Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
7,500
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
%matplotlib inline
mu,sigma=3,0.1
x=np.random.normal(mu,sigma,1000)
y=np.random.normal(mu,sigma,1000)
x_0=np.random.normal(2,sigma,1000)
y_0=np.random.normal(2,sigma,1000)
X_train_normal=np.ndarray(shape=(2000,2))
for i in range(0,2000):
if (i<1000):
X_train_normal[i]=[x[i],y[i]]
else:
X_train_normal[i]=[x_0[i-1001],y_0[i-1001]]
print(xy)
p_x=X_train_normal[:,0]
p_y=X_train_normal[:,1]
print(p_y)
plt.scatter(p_x,p_y)
plt.show()
X_test_normal=np.concatenate((0.1*np.random.randn(100,2)+2,0.1*np.random.randn(100,2)+3))
plt.scatter(X_test_normal[:,0],X_test_normal[:,1])
X_test_uniform=np.random.rand(100,2)+3
plt.scatter(X_test_uniform[:,0],X_test_uniform[:,1])
model = svm.OneClassSVM()
model.fit(X_train_normal)
predicted=model.predict(X_test_normal)-1
print(np.count_nonzero(predicted))
uniform=model.predict(X_test_uniform)-1
print(np.count_nonzero(uniform))
trained=model.predict(X_train_normal)-1
print(np.count_nonzero(trained))
new_model=svm.OneClassSVM(nu=0.1)
new_model.fit(X_train_normal)
new_predicted=new_model.predict(X_test_normal)-1
new_uniform=new_model.predict(X_test_uniform)-1
new_trained=new_model.predict(X_train_normal)-1
print(np.count_nonzero(new_trained))
print(np.count_nonzero(new_predicted))
print(np.count_nonzero(new_uniform))
plt.scatter(X_train_normal[:,0],X_train_normal[:,1],color='blue')
plt.scatter(X_test_uniform[:,0],X_test_uniform[:,1],color='black')
plt.scatter(X_test_normal[:,0],X_test_normal[:,1],color='red')
xx1, yy1 = np.meshgrid(np.linspace(1.5, 4, 1000), np.linspace(1.5, 4,1000))
Z1 =model.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
plt.contour(xx1, yy1, Z1, levels=[0],
linewidths=2)
plt.scatter(X_train_normal[:,0],X_train_normal[:,1],color='blue')
plt.scatter(X_test_uniform[:,0],X_test_uniform[:,1],color='black')
plt.scatter(X_test_normal[:,0],X_test_normal[:,1],color='red')
xx1, yy1 = np.meshgrid(np.linspace(1.5, 4, 1000), np.linspace(1.5, 4,1000))
Z1 =new_model.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
plt.contour(xx1, yy1, Z1, levels=[0],
linewidths=2)
from sklearn.covariance import EllipticEnvelope
train_uniform=np.concatenate((X_train_normal,X_test_uniform))
envelope=EllipticEnvelope()
envelope.fit(train_uniform)
envelope.predict(train_uniform)
print(range(100))
plt.scatter(range(100),envelope.mahalanobis(X_test_uniform),color='black')
plt.scatter(range(2000),envelope.mahalanobis(X_train_normal),color='blue')
plt.scatter(range(200),envelope.mahalanobis(X_test_normal),color='red')
plt.show()
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAXQAAAD8CAYAAABn919SAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAAEp0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMC4wcmMyKzI5MjAuZzExNWJhZGUsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy9bT2XBAAAgAElEQVR4nO3df5AkZ33f8fd353aB2cOWNHdRXSR2RsTEKcWVgNhScIGphAMMFwcpiaNwLGHBlLdYxS5RVMoIb1XKSdW6AFewZWKOWiHIWTMGORhKKkIAmeDE/gOZPRA/hJB16HZOUkm6HxKg4wBJu9/80T27s7PdMz0z3fOj5/Oq6tqZ3p6eZ57u/s4z3366H3N3RERk/E0NuwAiIpIOBXQRkZxQQBcRyQkFdBGRnFBAFxHJCQV0EZGcUEAXEckJBXQRkZxQQBcRyYl9SRYys0uAjwG/BDjwG8ADwB1ABdgAbnD3p9qt58CBA16pVHovrYjIBDpx4sQ5dz/YaTlLcum/mR0H/trdP2ZmM0AR+F3gSXd/v5ndDFzq7u9tt575+XlfX19P9glERAQAMzvh7vOdluuYcjGznwdeDdwG4O7PuPsPgOuA4+Fix4Hrey+uiIj0K0kO/SrgLPAJM/uGmX3MzGaBy939sXCZx4HLo15sZktmtm5m62fPnk2n1CIiskeSgL4PuAY45u4vA34M3Ny8gAd5m8jcjbuvufu8u88fPNgxBSQiIj1KEtAfAR5x93vC558mCPBPmNkhgPDvmWyKKCIiSXQM6O7+OPCwmf1iOOsw8F3gLmAxnLcI3JlJCUVEJJGk/dB/G6iZ2beAlwK/D7wfeJ2ZPQi8Nnw+MLVajUqlwtTUFJVKhVqtNsi3FxEZOYn6obv7vUBUl5nD6RYnmVqtxtLSEhcvXgSgXq+ztLQEwMLCwjCKJCIydGN5pejKysp2MG+4ePEiKysrQyqRiMjwjWVAP336dFfzRUQmwVgG9Lm5ua7mi4hMgrEM6KurqxSLxV3zisUiq6urQyqRiMjwjWVAX1hYYG1tjXK5jJlRLpdZW1vTCVERmWiJbs6VFt2cS0Ske6ndnEtERMaDArqISE4ooIuI5IQCuohITiigi4jkhAK6iEhOKKCLiOSEArqISE4ooIuI5IQCuohITiigi4jkhAK6iEhOKKCLiOSEArqISE4ooIuI5IQCuohITiigi4jkhAK6iEhOKKCLiOTEviQLmdkG8DSwCTzn7vNmdhlwB1ABNoAb3P2pbIopIiKddNNC/xfu/tKmgUpvBr7s7i8Bvhw+FxGRIekn5XIdcDx8fBy4vv/iiIhIr5IGdAe+ZGYnzGwpnHe5uz8WPn4cuDzqhWa2ZGbrZrZ+9uzZPosrIiJxEuXQgVe5+6Nm9veAu83se83/dHc3M496obuvAWsA8/PzkcuIiEj/ErXQ3f3R8O8Z4LPAtcATZnYIIPx7JqtCiohIZx0DupnNmtkLG4+B1wPfAe4CFsPFFoE7syqkiIh0liTlcjnwWTNrLP9n7v4FM/sa8Odm9k6gDtyQXTFFRKSTjgHd3R8C/mnE/PPA4SwKJSIi3dOVoiIiOTG2Ab1Wq1GpVJiamqJSqVCr1YZdJBGRoUrabXGk1Go1lpaWuHjxIgD1ep2lpaB7/MLCwjCLJiIyNGPZQl9ZWdkO5g0XL15kZWVlSCUSERm+sQzop0+f7mq+iMgkGMuAPjc319V8EZFJMJYBfXV1lWKxuGtesVhkdXV1SCUSERm+sQzoCwsLrK2tUS6XMTPK5TJra2s6ISoiE83cB3e/rPn5eV9fXx/Y+4mI5IGZnWgaiyLWWLbQRURkLwV0EZGcUEAXEckJBXQRkZxQQBcRyQkFdBGRnFBAFxHJCQV0EZGcUEAXEckJBXQRkZxQQBcRyQkFdBGRnMhlQNd4oyIyicZyTNF2NN6oiEyq3LXQNd6oiEyqxAHdzApm9g0z+1z4/Cozu8fMTprZHWY2k10x47WmV+r1euRyGm9URPKumxb6TcD9Tc8/APyhu/8C8BTwzjQLlkQjvVKv13F36vU6Zha5rMYbFZG8SxTQzexK4F8CHwufG/Aa4NPhIseB67MoYKvmFvni4uKe9Iq77wnqGm9URCZB0hb6HwG/A2yFz0vAD9z9ufD5I8AVKZdtj9YW+ebmZuRy7q7xRkVk4nTs5WJmvwaccfcTZvbPu30DM1sClqD/tEfUCc8o5XKZjY2Nvt5LRGTcJGmhvxJ4k5ltAJ8iSLXcAlxiZo0vhCuBR6Ne7O5r7j7v7vMHDx7sq7BJTmwqvSIik6pjQHf397n7le5eAd4M/B93XwC+Avx6uNgicGdmpQzFtfALhYLSKyIy8frph/5e4D1mdpIgp35bOkWKt7q6SrFY3DWvWCxy/Phxtra22NjYUDAXkYnV1ZWi7v5XwF+Fjx8Crk2/SPEawXplZYXTp08zNzfH6uqqgriICDm8UlREZFKN1b1cdJ8WEZF4Y9VC131aRETijVVAj+u2qPu0iIiMWUCP67ao+7SIiIxZQI/rtqgLiURExiygLywssLa2pvu0iIhEMHcf2JvNz8/7+vr6wN5PRCQPzOyEu893Wm6sWugiIhJPAV1EJCcU0EVEckIBXUQkJxTQRURyQgFdRCQnFNBFRHJCAV1EJCcU0EVEckIBXUQkJxTQRURyYuwDeq1Wo1KpMDU1RaVSoVarDbtIIiJDMVZD0LXSkHQiIjvGuoWuIelERHaMdUDXkHQiIjvGOqBrSDoRkR1jHdA1JJ2IyI6OAd3Mnm9mf2tm3zSz+8zsv4TzrzKze8zspJndYWYz2Rd3Nw1JJyKyo+MQdGZmwKy7XzCzaeBvgJuA9wCfcfdPmdlHgW+6+7F269IQdCIi3UttCDoPXAifToeTA68BPh3OPw5c32NZRUQkBYly6GZWMLN7gTPA3cD3gR+4+3PhIo8AV2RTRBERSSJRQHf3TXd/KXAlcC3wj5K+gZktmdm6ma2fPXu2x2LupqtDRUT26qqXi7v/APgK8MvAJWbWuNL0SuDRmNesufu8u88fPHiwr8LCztWh9Xodd9++OlRBXUQmXZJeLgfN7JLw8QuA1wH3EwT2Xw8XWwTuzKqQzXR1qIhItCQt9EPAV8zsW8DXgLvd/XPAe4H3mNlJoATcll0xd8RdBVqv19VKF5GJlqSXy7fc/WXu/k/c/Zfc/b+G8x9y92vd/Rfc/d+5+8+yL277q0CbUy/Ks4vIpBm7uy0eOXKEY8eiu7s3p150F0YRmTQdLyxKUxoXFlUqFer1euz/zYy5ubnIZcrlMhsbG329v4jIoKV2YdGo6XQnxbm5OeXZRWQijV1Ab5dDb9yYK2meXUQkT8YuoEfdYRGgVCpt35grbhlQF0cRya+xC+hRd1isVqucO3du+4RnY5k4GgBDRPJo7E6KdiPuBKpOjorIOMntSdFuaAAMEZkkuQ7oGgBDRCZJrlMuIiJ5oJRLE90GQEQmwdhd+t+txu12dRsAEcm73LfQdbtdEZkUuQ/o7W4DICKSJ7kP6HG3ATAz5dJFJFdyH9CPHDkSOd/dlXYRkVzJfUD//Oc/H/s/3QJARPIk9wG9XdBud1dGEZFxk/uA3i6HrlsAiEie5D6gR93Pxcx417vepX7oIpIruQ/oUfdzuf322/nIRz4y7KKJiKRK93IRERlxupeLiMiEUUAXEcmJsQ7ououiiMiOjgHdzF5kZl8xs++a2X1mdlM4/zIzu9vMHgz/Xpp9cXc07qJYr9dx9+27KCqoi8ik6nhS1MwOAYfc/etm9kLgBHA98HbgSXd/v5ndDFzq7u9tt640T4pqvFARmRSpnRR198fc/evh46eB+4ErgOuA4+FixwmC/MDEXQGqy/lFZFJ1lUM3swrwMuAe4HJ3fyz81+PA5amWrIO4K0CnpqaUUxeRiZQ4oJvZfuAvgHe7+4+a/+dB3iYyd2NmS2a2bmbrZ8+e7auwEOTODxw4EHs/883NTeXURWQiJQroZjZNEMxr7v6ZcPYTYX69kWc/E/Vad19z93l3nz948GBfha3VarzjHe/g/Pnze/43NbX3o2hkIhGZJEl6uRhwG3C/u3+o6V93AYvh40XgzvSLt9vKygrPPvts5P+2trYi5yunLiKTIkkL/ZXAfwBeY2b3htMR4P3A68zsQeC14fNM9RKcdYtcmVS1GlQqMDUV/FX2Mf/2dVrA3f8GsJh/H063OO3Nzc3F5s5LpRI/+clPdg0IXSwWdYtcmUi1GiwtQeNwqNeD5wC6yWh+jdWVoqurq0xPT++ZPzMzwy233MLi4iKFQgGAQqHA4uKibpErE2llZSeYN1y8GMyX/Bq7uy3WajVuuumm7ROjU1NTbG1tUSqVePrpp3nmmWe2ly0Wi6ytrSmoy8SZmoKoQ9sMYk43yQjL7d0WFxYWuOWWWyiVSsDOydDz58/vCuagXi4yueJOHemUUr6NXUBv3MMlqutilHq9rguNZOKsrkLLQF0Ui8F8ya+xC+grKyu7TnwmoQuNZNIsLMDaGpTLQZqlXA6eK/uYb2MX0PvpV64UTL6oW157CwuwsRHkzDc2FMwnwdgF9Hb9yqenpymVSgTXQkWr1+vceOONWRRNBqjRLa9eD07+NbrlKahLEnltDIxdQF9dXaXYmhwk6If+iU98gnPnzrG1tUW5XI5dx7FjxxTUx5y65fUmr4GsG50aA2NdR+4+sOnlL3+596tarXqpVGrcDMxLpZIvLy97uVx2M/NyuezVatWr1aoXi8Xt5VqnQqHQd1lkeMzcg8Nx92Q27JKNrmrVvVjcXV/FYjB/kpTL0ftOuTy6dQSse4IYO1b90Bs9XJpPik5PT2Nmkf3PAd761rfGrm+Qn13SVakELatW5XKQL5a9VGeBdn305+ZGs46S9kMfq4AeN0pRlMbIRfv27WNzc3PP/wuFAs8991zPZZHhar20HYJueerJEU8XGwXafbGdPj2adZTLC4u66eHSCPxLjRtYtIibL+NB3fK6p4uNAu366I99HSXJy6Q19ZtDL5fLsTnx1snMvBomvpaXl71QKGznzpeXl/sqRxaq1SCHZ7aTyxNJ06jmh4ch7ngb1ToiYQ59rAJ61InO6enp2KBeKpX2nCwdRaO6E0n+qOHQ2SjWUdKAPlY5dAhOjK6srHD69Gnm5uZYXV1te+Kz2ajerEsnq0SknVzm0CG4OdfGxgZbW1tsbEe7twCngM3w79HI147qlaJxpwY02JKIdGPsAnqzG2+8kbe+9X8Ba0CF4ONUgFuJC+qjOCTd2J+IEZGRMJYBvVarceDAAY4dOwb8PjDbssRsOH+vURySTnfGE5E0jF1A33v73LgAvXf+zMxMJkPS9XupsLrgiUgaxu6k6N6Li04RpFlabQBX7ZpTKpU4d+5cX+/fShe4iEymWi24d9Dp00F6dHU1u2M+tydF9+bAfxf4ccu8H4fzdzt//nzqA11keZOosb5JkHSk7Tu+RvZun0n6NqY1pXFzruiLi446nHLYDP8ebfu/YrGYWp/0rG4Spb7p+abtO97a3eArC+TxwiL36IuL4qejDhdaKv2Cw1Evp1TzpVL0hi2V+lvvoHcYGSxt396NwoU/g77bZ9KAPnYpl4WFBdbW1iiXy5hZ2/uet+sBM4rdF5upb3q+afv2ZlRSHaPa1XjsAjrsvbgoPqjH94D5rcsuSyWB+eST3c1PalR3GEmHtu9uSc8njMrAJiPb1bhTEx74OHAG+E7TvMuAu4EHw7+XJvk5kEbKJUp8GuZU5M+io/yhPzsz03cCs1p1LxSy+emsHGu+afvu6KYuekl1ZJWiGWTqh7Ry6MCrgWtaAvoHgZvDxzcDH0jyZmncnKtxs61SqbQ9clHznRQJ77RImxx63fb3HYWjdsK0D8xRyBXKbmluk1HYvqNQhm7OJ3R77qGfL85RqJuG1AJ6sC4qLQH9AeBQ+PgQ8ECS9fQT0JOeDN1798XdvVymp9/uW1F7RJuv+agNG7djFQoKvHmVt1b1MD5P1LHUTau7WnVv/XE9MxNf5l5PPneqm0EH+6wD+g+aHlvz84jXLgHrwPrc3FzPH6ibe6F3mh7uIk8St2G7/E6QHMhbz5RBf564Yymup1hUOapV9+np3ctNT8cH1F57o4zauKMDC+jh86eSrKefFvpOGqX/6Sj4hdYtFbNFymX3o1T9FGXfxPwUZT9KNbPcuYyuvA1MPejPExckS6XkAbLbL6Fev7Ta1c0wvtiTBvRee7k8YWaHAMK/Z3pcT2Jp3lTrk8BvAo8UCh1vnvLKeo1bWaJCnSmcCnVuZYkbNmujeZZbMtNrz5RhXBGa5D0H3dMmrkvmk08mv5dRN909azW4cGHv/CTHabu6Gekup0miPntb6H/A7pOiH0yynkHk0AGfmZlJtJyZdUyGPVwoR34dP1woj9RJk6xk/RnTXn+W5e3lp/aw8tRJ3nPQZUujZZt0HXGdFmZng18EnfaPdnXTTRnS2hdJsZfLJ4HHgGeBR4B3AiXgywTdFv8SuCzJm6XZy2V2djYyQC8vL3u1Wt1O0RwFPwW+Gf492rT8byf4rbdF9G+vLcb0d3YX4nbq5eV0dtSo9Td+6vay3qj1TU8nO4C7eY+4sSij5g/j53k37znIRkkaXyDt1tH8WeJSoq1T44RqVD30M+5o2l+WqQX0NKc0A3q5XPbl5eU9zxtdGRvBvDVXvhlOdTP/yf747ovVahAITlGOXObpUrnHz7B7J0krOGYhLjC05hd73VHj1t/rejutL6sWaLuDN408dTdBt1qN/+yDyPV3Kuvy8k6wLRSC592uKy74tuus0G6ane2+4dLpc6b9RZ67gB6Vcmm0whvBvLXL4qleti5B67txJv0oVb/A7q19gaK/fbqaSgsy6qBrt5MPUlwwSmtHTbL+btabtLyFQrpfoO0O3k69JToF6qStwcb7tKuDrE/YJ+nql7TV2m0LN8mXebdTPw2XtE845y6g99JtcbPHLflwobxrVlQvl14OkKQ7XSO1P2zdHCS97KhJ1t+83ubA1WjlJUlvtJvSaLG3O3jbpa3apYdKpfjufM37XtKW6SD6y3dqlWZxAVG7nHYWU9JjXi30DuK6LbbLkZ/qZYvNzPhbwoCdZhBr91M4zQ3frXatxHY57qjydpuPjepT3EvgagTCduXLsq7bBZS4tFoaQajTezdPg2ggdGqVdtNq7fQlmeQXSdL9YWoqeZ0nPeaVQ+8gqoUelSO/wE5Q/zDEXxUaM/2U6e0WeGPqpYXeHNxKpb1Xt6W143R633YnBLv5Od+c80/a4kzSA6RdvTS/vttfC5C83lvrut0XU1QOeHm5feCIqtMud8s9U6EQrKvXtFUWJ0MH0UI3S34stbtoqTFNT0fvu+0aLknrcCR7uaQ59ZtDT5ojP9Xh/5222inK20+7yaF303Lo5SDsXEedr2JtrLs5MLVOne7lHrWj9pIr7tSybc699lp/SXo+tB6kcV9Ys7PRr9+3L3mZooJHr1OnOozarnEnENPovZQkh97psv20jqFG+TudU4g70Xr4cPRrGue3or7Es0xr5TKgt/Yvj8uRb3b4/xZBnrz964OnSXu5dGqldTPF9Rnu1DsmzVxitztmpxZq1OdL8hO8194LjXV0+qXUehI6ac+eXqe01gNBubtJi0HwpRaXYkgjQHVK4bW7bL+fnirNU/MXdK+57E4NlCTvnabcBfSolMupmFo9xRUOR/1MzP/PUHKID9abmB+l6kepJrqRV6eWQC8HarMkO3parb7WHTdpiy3uAGjXKk5ysPX6JdUob2u9RAWz5sCVZh1mPTW+jJrTCo1WeFpf7kkCVKOLb2sZku4jnVIy3Uy99gdv3dfb1XncL7XG/7OQu4AedVI0OodeDHPcm36G6CjXCOhHqfpmzIVDZyjtSbU0T+f2lzv+lE/rIEq6o6ddjnYHS9LcersdP6512fyF1umLsl2ru5sAkeQn+iCmxjZsPv/RyzZK83N06v8dd2K7NbD3etK0223YKuq8UmtdR+1D7bZPuzJkIXcBPa7bYtDL5Yo9JyzBY4P1Jta0TPSWaXcydedLI72DJmoHb+hmR0+7pR530MS9z/79uw/4dmOuxgXc5i+OJGO2RuU7s66HLKfWFuQguuW128dmZuK/sDuddGzk5pN0wezncyZpGXeb0unlC6a5MaKTom1Uq1WP73O+uatSG71S4oJy80nPuLRL3Gu3INNg3ryDNyTd0VvTJN10x+pmJ09SnkarPar1Vigk66lQLsf/vG0E9Hat0ax/sWQ5zc7ublVm/X5XX905957F+7aeqO+1lR51Erz5V8QgvhQb09RU0MhQt8UOGpf17+17fut2pUX1SmmeWlvXcb1YzhB9FDV/GWQ19ZJDb7SiWtMg7Q7ExsHTTZfKxvq7OVijdvg0D552/4+6eCfr7ZfFlMWX86hNSa6ijntdp18BozL1mpLJZUCvVqv+FvbmzbfAf8j+7ZZ5XG1ugX+Y5T3/iupnHhfos26dN3bQdl0i46bWwJx2EOh0t7lRnZpbgWn0Adc0vG3YOjUaLMM+99HN5+hFLgO6u/u5Nlvup8x0vNz/h8w6xF8s1Dz/DCX/IbO+RfBlcIbSQAI6xH+TD2vHbT7Btbw8nDL0M+3b1zkvr2m8pnEJ4s2TWujNqm26EYZTkv9/gcN7TpheoOgfZnlPq7x1fYNqpTd22FJpdx55GDtxc756WK3zNH5tNFro45p20TTe0yBy6BYsOxjz8/O+vr7e8+svHDjA/vPn+y6HEwyE2uo5Cuxjs+PrNyhzFRt9l2NcVKvB36UluHix9/VMTcHWVjpl6lWhAJudN7FIKsrlYCSjublglKSoUZiSMLMT7j7fabl9va1+OIopBHOIDuYAhQTBHGCOURhrajBKpWAnrFT6C+Yw/GAOCuZRCgV4wQuih2uT/qQRzLvR65iiQ3Ge2WEXAYDTZDTo4ogpFuGGG4JgXq8PuzSShVIJrrxSwTwr7sGxs7Q0mLFkxyag33gjwPMyW39cGiZquc9xJLNyjJJKBW67TcE8z86f1/YdhIsX4W1vyz6oj01AX1uDEk9ltv4kwbyx3L/nzzMrxyj57nfhmWeGXQqRfNjagsXFbIP62AT0zU34WYYt9G4cIJ1cvohMls1NuOmm7NY/NgH9i7yW5/PTYRdDRKQvKfXtiDQ2Af11fDlxWiRrg+voKSKS3NgE9FEyKl8sIjJ+LMMAooDeo6MMoA+SiOROltdy9hXQzewNZvaAmZ00s5vTKlSUn/L8LFffFQNuIcMzGyIiPeg5oJtZAfgT4I3A1cBRM7s6rYK1eh4/y2rVPVFPFxEZNf200K8FTrr7Q+7+DPAp4Lp0irXXpFydKSL5dnVmzd7+AvoVwMNNzx8J52XiQ6VVfsp0Vqvv2jlKwy6CiIwZM7jvvuzWn/lJUTNbMrN1M1s/e/Zsz+v5Z7cssDz9Cc5S2h57bovOXQiTLBf3v7j5P2OGm7ilwztL3pVKcPhwcHOrKDMzwR0mZaeusuzhMeqmp+H22zN+kyT32I2agF8Gvtj0/H3A+9q9pv8Ri/aOF/jXy1U/R2l7EIofMuvnrORbmJ+e2hm44i3bA1cEQ9c1lt+0KT//96/eNW+LYCDpL3DY6xaMTbo5VfAtgiHofnO2GjvW5f79O+MYdjOQQqm0e6T65vuel0rBGIVp3gu9MWBFr+M4Nj5nY7s0f9bGZ+lnoOao+p2aCtbdPMReY39oXX7//r1D8rWOLxk3PF+hELw2zUF+m/fhXveLduVoLWvca6Lev3nwkqj6afwtlXbXf9yoWp3qK26fa8xL+n6dPnvrcdT6WaNee/hw5/2ik6g6HPlBogluvfsQcBUwA3wT+MftXpPGiEVZyuIAHpTmss/O7gwIUSgEO+kwPleSHbtdnY/z9pD2tG27kzSg9zXAhZkdAf4IKAAfd/fVdsv3O8CFiMgkGsgAF+7+eeDz/axDRETSoVM2IiI5oYAuIpITCugiIjmhgC4ikhMK6CIiOaGALiKSEwroIiI50deFRV2/mdlZoJ7Cqg4A51JYT9pGsVyjWCZQuboximUClasb/Zap7O4HOy000ICeFjNbT3LV1KCNYrlGsUygcnVjFMsEKlc3BlUmpVxERHJCAV1EJCfGNaCvDbsAMUaxXKNYJlC5ujGKZQKVqxsDKdNY5tBFRGSvcW2hi4hIi7EL6Gb2BjN7wMxOmtnNA3zfF5nZV8zsu2Z2n5ndFM7/PTN71MzuDacjTa95X1jOB8zsVzMs24aZfTt8//Vw3mVmdreZPRj+vTScb2b2x2G5vmVm12RQnl9sqo97zexHZvbuYdSVmX3czM6Y2Xea5nVdN2a2GC7/oJktZlSuPzCz74Xv/VkzuyScXzGznzTV20ebXvPycNufDMve1yBvMeXqeruleZzGlOmOpvJsmNm94fxB1lVcTBje/pVkFIxRmQgG0vg+8GJ2Rkm6ekDvfQi4Jnz8QuDvgKuB3wP+U8TyV4flex7BqE7fBwoZlW0DONAy74PAzeHjm4EPhI+PAP8bMOAVwD0D2GaPA+Vh1BXwauAa4Du91g1wGcHoXJcBl4aPL82gXK8H9oWPP9BUrkrzci3r+duwrBaW/Y0ZlKur7Zb2cRpVppb//zfgPw+hruJiwtD2r3FroV8LnHT3h9z9GeBTwHWDeGN3f8zdvx4+fhq4H7iizUuuAz7l7j9z91PASYLyD8p1wPHw8XHg+qb5f+qBrwKXmNmhDMtxGPi+u7e7oCyzunL3/wc8GfF+3dTNrwJ3u/uT7v4UcDfwhrTL5e5fcvfnwqdfBa5st46wbD/n7l/1IDL8adNnSa1cbcRtt1SP03ZlClvZNwCfbLeOjOoqLiYMbf8at4B+BfBw0/NHaB9UM2FmFeBlwD3hrN8Kf0J9vPHzisGW1YEvmdkJM1sK513u7o+Fjx8HLh9CuQDezO6Dbdh1Bd3XzTD2u98gaM01XGVm3zCz/2tmvxLOuyIsyyDK1c12G2R9/QrwhLs/2DRv4HXVEhOGtn+NW0AfOjPbD/wF8G53/xFwDPgHwEuBxwh+/g3aq9z9GuCNwH80s1c3/zNskQy8O5OZzQBvAv5nOGsU6lm6fWsAAAJGSURBVGqXYdVNO2a2AjwH1MJZjwFz7v4y4D3An5nZzw2wSCO33ZocZXeDYeB1FRETtg16/xq3gP4o8KKm51eG8wbCzKYJNlzN3T8D4O5PuPumu28Bt7KTKhhYWd390fDvGeCzYRmeaKRSwr9nBl0ugi+Yr7v7E2H5hl5XoW7rZmDlM7O3A78GLITBgDClcT58fIIgP/0PwzI0p2UyKVcP220g9WVm+4B/A9zRVNaB1lVUTGCI+9e4BfSvAS8xs6vC1t+bgbsG8cZhru424H53/1DT/Ob8878GGmfi7wLebGbPM7OrgJcQnJRJu1yzZvbCxmOCE2vfCd+/cbZ8EbizqVxvC8+4vwL4YdPPw7Ttaj0Nu66adFs3XwReb2aXhumG14fzUmVmbwB+B3iTu19smn/QzArh4xcT1M9DYdl+ZGavCPfPtzV9ljTL1e12G9Rx+lrge+6+nUoZZF3FxQSGuX/1c5Z3GBPBmeK/I/jmXRng+76K4KfTt4B7w+kIcDvw7XD+XcChpteshOV8gD7PqLcp14sJehF8E7ivUSdACfgy8CDwl8Bl4XwD/iQs17eB+YzKNQucB36+ad7A64rgC+Ux4FmC3OQ7e6kbgpz2yXB6R0blOkmQS23sXx8Nl/234ba9F/g68K+a1jNPEGC/D/x3wosFUy5X19stzeM0qkzh/P8BvKtl2UHWVVxMGNr+pStFRURyYtxSLiIiEkMBXUQkJxTQRURyQgFdRCQnFNBFRHJCAV1EJCcU0EVEckIBXUQkJ/4/88JKSM/h1cwAAAAASUVORK5CYII=
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use the np.random module to generate a normal distribution of 1,000 data points in two dimensions (e.g. x, y) - choose whatever mean and sigma^2 you like. Generate another 1,000 data points with a normal distribution in two dimensions that are well separated from the first set. You now have two "clusters". Concatenate them so you have 2,000 data points in two dimensions. Plot the points. This will be the training set.
Step2: Plot the points.
Step3: Generate 100 data points with the same distribution as your first random normal 2-d set, and 100 data points with the same distribution as your second random normal 2-d set. This will be the test set labeled X_test_normal.
Step4: Generate 100 data points with a random uniform distribution. This will be the test set labeled X_test_uniform.
Step5: Define a model classifier with the svm.OneClassSVM
Step6: Fit the model to the training data.
Step7: Use the trained model to predict whether X_test_normal data point are in the same distributions. Calculate the fraction of "false" predictions.
Step8: Use the trained model to predict whether X_test_uniform is in the same distribution. Calculate the fraction of "false" predictions.
Step9: Use the trained model to see how well it recovers the training data. (Predict on the training data, and calculate the fraction of "false" predictions.)
Step10: Create another instance of the model classifier, but change the kwarg value for nu. Hint
Step11: Redo the prediction on the training set, prediction on X_test_random, and prediction on X_test.
Step12: Plot in scatter points the X_train in blue, X_test_normal in red, and X_test_uniform in black. Overplot the trained model decision function boundary for the first instance of the model classifier.
Step13: Do the same for the second instance of the model classifier.
Step14: Test how well EllipticEnvelope predicts the outliers when you concatenate the training data with the X_test_uniform data.
Step15: Compute and plot the mahanalobis distances of X_test, X_train_normal, X_train_uniform
|
7,501
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
# create a new git repository called gitdemo:
!git init gitdemo
!git clone https://github.com/qutip/qutip
!git clone gitdemo gitdemo2
!git status
%%file README
A file with information about the gitdemo repository.
!git status
!git add README
!git status
!git commit -m "Added a README file" README
!git add Lecture-7-Revision-Control-Software.ipynb
!git commit -m "added notebook file" Lecture-7-Revision-Control-Software.ipynb
!git status
%%file README
A file with information about the gitdemo repository.
A new line.
!git status
!git commit -m "added one more line in README" README
!git status
%%file tmpfile
A short-lived file.
!git add tmpfile
!git commit -m "adding file tmpfile" tmpfile
!git rm tmpfile
!git commit -m "remove file tmpfile" tmpfile
!git log
%%file README
A file with information about the gitdemo repository.
README files usually contains installation instructions, and information about how to get started using the software (for example).
!git diff README
Image(filename='images/github-diff.png')
!git checkout -- README
!git status
!git log
!git checkout 1f26ad648a791e266fbb951ef5c49b8d990e6461
!cat README
!git checkout master
!cat README
!git status
!git log
!git tag -a demotag1 -m "Code used for this and that purpuse"
!git tag -l
!git show demotag1
!git branch expr1
!git branch
!git checkout expr1
%%file README
A file with information about the gitdemo repository.
README files usually contains installation instructions, and information about how to get started using the software (for example).
Experimental addition.
!git commit -m "added a line in expr1 branch" README
!git branch
!git checkout master
!git branch
!git checkout master
!git merge expr1
!git branch
!git branch -d expr1
!git branch
!cat README
!git remote
!git remote show origin
!git pull origin
!git status
!git add Lecture-7-Revision-Control-Software.ipynb
!git commit -m "added lecture notebook about RCS" Lecture-7-Revision-Control-Software.ipynb
!git push
Image(filename='images/github-project-page.png')
Image(filename='images/gitk.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In any software development, one of the most important tools are revision control software (RCS).
Step2: If we want to fork or clone an existing repository, we can use the command git clone repository
Step3: Git clone can take a URL to a public repository, like above, or a path to a local directory
Step4: We can also clone private repositories over secure protocols such as SSH
Step5: In this case, only the current ipython notebook has been added. It is listed as an untracked file, and is therefore not in the repository yet.
Step6: After having added the file README, the command git status list it as an untracked file.
Step7: Now that it has been added, it is listed as a new file that has not yet been commited to the repository.
Step8: After committing the change to the repository from the local working directory, git status again reports that working directory is clean.
Step9: Again, we can commit such changes to the repository using the git commit -m "message" command.
Step10: Removing files
Step11: Add it
Step12: Remove it again
Step13: Commit logs
Step14: In the commit log, each revision is shown with a timestampe, a unique has tag that, and author information and the commit message.
Step15: That looks quite cryptic but is a standard form for describing changes in files. We can use other tools, like graphical user interfaces or web based systems to get a more easily understandable diff.
Step16: Discard changes in the working directory
Step17: Checking out old revisions
Step18: Now the content of all the files like in the revision with the hash code listed above (first revision)
Step19: We can move back to "the latest" (master) with the command
Step20: Tagging and branching
Step21: To retreive the code in the state corresponding to a particular tag, we can use the git checkout tagname command
Step22: We can list the existing branches like this
Step23: And we can switch between branches using checkout
Step24: Make a change in the new branch.
Step25: We can merge an existing branch and all its changesets into another branch (for example the master branch) like this
Step26: We can delete the branch expr1 now that it has been merged into the master
Step27: pulling and pushing changesets between repositories
Step28: pull
Step29: We can register addresses to many different repositories, and pull in different changesets from different sources, but the default source is the origin from where the repository was first cloned (and the work origin could have been omitted from the line above).
Step30: Hosted repositories
Step31: Graphical user interfaces
|
7,502
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display, SVG
s =
<svg width="100" height="100">
<circle cx="50" cy="50" r="20" fill="aquamarine" />
</svg>
SVG(s)
def draw_circle(width=100, height=100, cx=25, cy=25, r=5, fill='red'):
Draw an SVG circle.
Parameters
----------
width : int
The width of the svg drawing area in px.
height : int
The height of the svg drawing area in px.
cx : int
The x position of the center of the circle in px.
cy : int
The y position of the center of the circle in px.
r : int
The radius of the circle in px.
fill : str
The fill color of the circle.
s='<svg width="%d" height="%d"> <circle cx="%d" cy="%d" r="%d" fill="%s" /> </svg>' %(width,height,cx,cy,r,fill)
f=SVG(s)
return display(f)
draw_circle(cx=10, cy=10, r=10, fill='blue')
assert True # leave this to grade the draw_circle function
w=interactive(draw_circle, width=fixed(300), height=fixed(300), cx=(0,300), cy=(0,300), r=(0,50), fill= 'red')
c = w.children
assert c[0].min==0 and c[0].max==300
assert c[1].min==0 and c[1].max==300
assert c[2].min==0 and c[2].max==50
assert c[3].value=='red'
display(w)
assert True # leave this to grade the display of the widget
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interact with SVG display
Step4: Write a function named draw_circle that draws a circle using SVG. Your function should take the parameters of the circle as function arguments and have defaults as shown. You will have to write the raw SVG code as a Python string and then use the IPython.display.SVG object and IPython.display.display function.
Step5: Use interactive to build a user interface for exploing the draw_circle function
Step6: Use the display function to show the widgets created by interactive
|
7,503
|
<ASSISTANT_TASK:>
Python Code:
#nltk.download()
mycorpus = nltk.corpus.reuters
n_docs = 500000
filenames = mycorpus.fileids()
fn_train = [f for f in filenames if f[0:5]=='train']
corpus_text = [mycorpus.raw(f) for f in fn_train]
# Reduced dataset:
n_docs = min(n_docs, len(corpus_text))
corpus_text = [corpus_text[n] for n in range(n_docs)]
print 'Loaded {0} files'.format(len(corpus_text))
corpusRDD = sc.parallelize(corpus_text, 4)
print "\nRDD created with {0} elements".format(corpusRDD.count())
# Compute RDD replacing tokens by token_ids
corpus_sparseRDD = corpus_wcRDD2.map(lambda x: [(invD[t[0]], t[1]) for t in x])
# Convert list of tuplas into Vectors.sparse object.
corpus_sparseRDD = corpus_sparseRDD.map(lambda x: Vectors.sparse(n_tokens, x))
corpus4lda = corpus_sparseRDD.zipWithIndex().map(lambda x: [x[1], x[0]]).cache()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Para evitar problemas de sobrecarga de memoria, o de tiempo de procesado, puede reducir el tamaño el corpus, modificando el valor de la variable n_docs a continuación.
Step2: A continuación cargaremos los datos en un RDD
Step3: 1. Ejercicios
|
7,504
|
<ASSISTANT_TASK:>
Python Code:
import gzip
import pickle
from os import path
from collections import defaultdict
from numpy import sign
Load buzz data as a dictionary.
You can give parameter for data so that you will get what you need only.
def load_buzz(root='../data', data=['train', 'test', 'questions'], format='pklz'):
buzz_data = {}
for ii in data:
file_path = path.join(root, ii + "." + format)
with gzip.open(file_path, "rb") as fp:
buzz_data[ii] = pickle.load(fp)
return buzz_data
from numpy import sign, abs
def _feat_basic(bd, group):
X = []
for item in bd[group].items():
qid = item[1]['qid']
q = bd['questions'][qid]
#item[1]['q_length'] = max(q['pos_token'].keys())
item[1]['q_length'] = len(q['question'].split())
item[1]['category'] = q['category'].lower()
item[1]['answer'] = q['answer'].lower()
X.append(item[1])
return X
def _feat_sign_val(data):
for item in data:
item['sign_val'] = sign(item['position'])
def _get_pos(bd, sign_val=None):
# bd is not bd, bd is bd['train']
unwanted_index = []
pos_uid = defaultdict(list)
pos_qid = defaultdict(list)
for index, key in enumerate(bd):
if sign_val and sign(bd[key]['position']) != sign_val:
unwanted_index.append(index)
else:
pos_uid[bd[key]['uid']].append(bd[key]['position'])
pos_qid[bd[key]['qid']].append(bd[key]['position'])
return pos_uid, pos_qid, unwanted_index
def _get_avg_pos(bd, sign_val=None):
pos_uid, pos_qid, unwanted_index = _get_pos(bd, sign_val)
avg_pos_uid = {}
avg_pos_qid = {}
if not sign_val:
sign_val = 1
for key in pos_uid:
pos = pos_uid[key]
avg_pos_uid[key] = sign_val * (sum(pos) / len(pos))
for key in pos_qid:
pos = pos_qid[key]
avg_pos_qid[key] = sign_val * (sum(pos) / len(pos))
return avg_pos_uid, avg_pos_qid, unwanted_index
def _feat_avg_pos(data, bd, group, sign_val):
avg_pos_uid, avg_pos_qid, unwanted_index = _get_avg_pos(bd['train'], sign_val=sign_val)
if group == 'train':
for index in sorted(unwanted_index, reverse=True):
del data[index]
for item in data:
if item['uid'] in avg_pos_uid:
item['avg_pos_uid'] = avg_pos_uid[item['uid']]
else:
vals = avg_pos_uid.values()
item['avg_pos_uid'] = sum(vals) / float(len(vals))
if item['qid'] in avg_pos_qid:
item['avg_pos_qid'] = avg_pos_qid[item['qid']]
else:
vals = avg_pos_qid.values()
item['avg_pos_qid'] = sum(vals) / float(len(vals))
# Response position can be longer than length of question
if item['avg_pos_uid'] > item['q_length']:
item['avg_pos_uid'] = item['q_length']
if item['avg_pos_qid'] > item['q_length']:
item['avg_pos_qid'] = item['q_length']
def featurize(bd, group, sign_val=None, extra=None):
# Basic features
# qid(string), uid(string), position(float)
# answer'(string), 'potistion'(float), 'qid'(string), 'uid'(string)
X = _feat_basic(bd, group=group)
# Some extra features
if extra:
for func_name in extra:
func_name = '_feat_' + func_name
if func_name in ['_feat_avg_pos']:
globals()[func_name](X, bd, group=group, sign_val=sign_val)
else:
globals()[func_name](X)
if group == 'train':
y = []
for item in X:
y.append(item['position'])
del item['position']
return X, y
elif group == 'test':
return X
else:
raise ValueError(group, 'is not the proper type')
import csv
def select(data, keys):
unwanted = data[0].keys() - keys
for item in data:
for unwanted_key in unwanted:
del item[unwanted_key]
return data
def write_result(test_set, predictions, file_name='guess.csv'):
predictions = sorted([[id, predictions[index]] for index, id in enumerate(test_set.keys())])
predictions.insert(0,["id", "position"])
with open(file_name, "w") as fp:
writer = csv.writer(fp, delimiter=',')
writer.writerows(predictions)
%matplotlib inline
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
def plot_gmm(X, models, n_components, covariance_type='diag',
figsize=(10, 20), suptitle=None, xlabel=None, ylabel=None):
color_iter = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'gray', 'pink', 'lime']
plt.figure(figsize=figsize)
plt.suptitle(suptitle, fontsize=20)
for i, model in enumerate(models):
mm = getattr(mixture, model)(n_components=n_components,
covariance_type=covariance_type)
mm.fit(X_pos_qid)
Y = mm.predict(X_pos_qid)
plt.subplot(len(models), 1, 1 + i)
for i, color in enumerate(color_iter):
plt.scatter(X_pos_qid[Y == i, 0], X_pos_qid[Y == i, 1], .7, color=color)
plt.title(model, fontsize=15)
plt.xlabel(xlabel, fontsize=12)
plt.ylabel(ylabel, fontsize=12)
plt.grid()
plt.show()
from collections import UserDict
import numpy as np
class DictDict(UserDict):
def __init__(self, bd):
UserDict.__init__(self)
self._set_bd(bd)
def sub_keys(self):
return self[list(self.keys())[0]].keys()
def select(self, sub_keys):
vals = []
for key in self:
vals.append([self[key][sub_key] for sub_key in sub_keys])
return np.array(vals)
def sub_append(self, sub_key, values):
for index, key in enumerate(self):
self[key][sub_key] = values[index]
class Users(DictDict):
def _set_bd(self, bd):
pos_uid, _, _ = _get_pos(bd['train'], sign_val=None)
for key in pos_uid:
u = np.array(pos_uid[key])
ave_pos_uid = sum(abs(u)) / float(len(u))
acc_ratio_uid = len(u[u > 0]) / float(len(u))
self[key] = {'ave_pos_uid': ave_pos_uid,
'acc_ratio_uid': acc_ratio_uid}
class Questions(DictDict):
def _set_bd(self, bd):
_, pos_qid, _ = _get_pos(bd['train'], sign_val=None)
for key in pos_qid:
u = np.array(pos_qid[key])
ave_pos_qid = sum(abs(u)) / float(len(u))
acc_ratio_qid = len(u[u > 0]) / float(len(u))
self[key] = bd['questions'][key]
self[key]['ave_pos_qid'] = ave_pos_qid
self[key]['acc_ratio_qid'] = acc_ratio_qid
users = Users(load_buzz())
questions = Questions(load_buzz())
X_pos_uid = users.select(['ave_pos_uid', 'acc_ratio_uid'])
X_pos_qid = questions.select(['ave_pos_qid', 'acc_ratio_qid'])
plot_gmm(X_pos_uid,
models=['GMM', 'VBGMM', 'DPGMM'],
n_components=8,
covariance_type='diag',
figsize=(10, 20),
suptitle='Classifying users',
xlabel='abs(position)',
ylabel='accuracy ratio')
plot_gmm(X_pos_qid,
models=['GMM', 'VBGMM', 'DPGMM'],
n_components=8,
covariance_type='diag',
figsize=(10, 20),
suptitle='Classifying questions',
xlabel='abs(position)',
ylabel='accuracy ratio')
# Question category
n_components = 8
gmm = mixture.GMM(n_components=n_components, covariance_type='diag')
gmm.fit(X_pos_qid)
pred_cat_qid = gmm.predict(X_pos_qid)
plt.hist(pred_cat_qid, bins=50, facecolor='g', alpha=0.75)
plt.xlabel("Category number")
plt.ylabel("Count")
plt.title("Question Category: " + str(n_components) + " categories")
plt.grid(True)
plt.show()
# User category
n_components = 8
gmm = mixture.GMM(n_components=n_components, covariance_type='diag')
gmm.fit(X_pos_uid)
pred_cat_uid = gmm.predict(X_pos_uid)
plt.hist(pred_cat_uid, bins=50, facecolor='g', alpha=0.75)
plt.xlabel("Category number")
plt.ylabel("Count")
plt.title("User Category: " + str(n_components) + " categories")
plt.grid(True)
plt.show()
users.sub_append('cat', [str(x) for x in pred_cat_uid])
questions.sub_append('cat', [str(x) for x in pred_cat_qid])
print(users[1])
print(questions[1])
regression_keys = ['category', 'q_length', 'qid', 'uid', 'answer', 'avg_pos_uid', 'avg_pos_qid']
X_train, y_train = featurize(load_buzz(), group='train', sign_val=None, extra=['sign_val', 'avg_pos'])
X_train = select(X_train, regression_keys)
for index, item in enumerate(X_train):
uid = item['uid']
qid = item['qid']
item['acc_ratio_uid'] = users[uid]['acc_ratio_uid']
item['acc_ratio_qid'] = questions[qid]['acc_ratio_qid']
item['uid'] = str(uid)
item['qid'] = str(qid)
X_train[1]
import multiprocessing
from sklearn import linear_model
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.feature_extraction import DictVectorizer
import math
from numpy import abs, sqrt
vec = DictVectorizer()
X_train = vec.fit_transform(X_train)
regressor_names =
LinearRegression
Ridge
Lasso
ElasticNet
print ("=== Linear Cross validation RMSE scores:")
for regressor in regressor_names.split():
scores = cross_val_score(getattr(linear_model, regressor)(),
X_train, y_train,
cv=10,
scoring='mean_squared_error',
n_jobs=multiprocessing.cpu_count()-1
)
print (regressor, sqrt(abs(scores)).mean())
def transform(X):
for index, item in enumerate(X):
uid = item['uid']
qid = item['qid']
item['uid'] = str(uid)
item['qid'] = str(qid)
# uid
if uid in users:
item['acc_ratio_uid'] = users[uid]['acc_ratio_uid']
else:
acc = users.select(['acc_ratio_uid'])
item['acc_ratio_uid'] = sum(acc) / float(len(acc))
# qid
if qid in questions:
item['acc_ratio_qid'] = questions[qid]['acc_ratio_qid']
else:
acc = questions.select(['acc_ratio_qid'])
item['acc_ratio_qid'] = sum(acc) / float(len(acc))
regression_keys = ['category', 'q_length', 'qid', 'uid', 'answer', 'avg_pos_uid', 'avg_pos_qid']
X_train, y_train = featurize(load_buzz(), group='train', sign_val=None, extra=['avg_pos'])
X_train = select(X_train, regression_keys)
X_test = featurize(load_buzz(), group='test', sign_val=None, extra=['avg_pos'])
X_test = select(X_test, regression_keys)
transform(X_train)
transform(X_test)
X_train[1]
X_test[1]
vec = DictVectorizer()
vec.fit(X_train + X_test)
X_train = vec.transform(X_train)
X_test = vec.transform(X_test)
regressor = linear_model.LassoCV(n_jobs=3, normalize=True)
regressor.fit(X_train, y_train)
print(regressor.coef_)
print(regressor.alpha_)
predictions = regressor.predict(X_test)
write_result(load_buzz()['test'], predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model10
Step2: Feature functions(private)
Step3: Feature function(public)
Step4: Utility functions
Step5: GMM
Step7: B. Modeling
Step8: Training and testing model
Step9: Writing result
|
7,505
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import os
import numpy as np
os.environ['DES_BACKEND'] = 'numpy'
import desolver as de
import desolver.backend as D
D.set_float_fmt('gdual_real128')
# Definition of the controller in terms of some weights parameters
def u(state, weights):
x,v = state
a,b,c,e,f,g = weights
return a + b*x + c*v + e*x*v + f*x**2 + g*v**2
# Definition of the equation of motion (our physics simulator propagating the system to its next state)
def eom(state, weights):
x,v = state
dx = v
dv = u(state, weights)
return (dx, dv)
weights = D.array([D.gdual_real128(0.05*(np.random.uniform()-0.5), _, 4) for _ in "abcefg"])
x = [D.gdual_real128(2*(np.random.uniform()-0.5))]
v = [D.gdual_real128(2*(np.random.uniform()-0.5))]
y0 = D.array(x + v, dtype=D.gdual_real128)
def rhs(t, state, weights, **kwargs):
return D.array(eom(state, weights))
pyaudi_integration = de.OdeSystem(rhs, y0=y0, dense_output=False, t=(0, 10.), dt=0.1, rtol=1e-16, atol=1e-16, constants=dict(weights=weights))
pyaudi_integration.set_method("RK87")
pyaudi_integration.integrate(eta=True)
x,v = pyaudi_integration.y.T
plt.plot([it for it in x.astype(np.float64)],[it for it in v.astype(np.float64)])
plt.plot(x.astype(np.float64)[0], v.astype(np.float64)[0], 'ro')
plt.show()
xf, vf = x[-1], v[-1]
print("initial xf: {}".format(xf.constant_cf))
print("initial vf: {}".format(vf.constant_cf))
dweights = dict({'da': -0.0002, 'db': 0.0003, 'dc': -0.002, 'de': 0.003, 'df': 0.002, 'dg': -0.001})
#Lets predict the new final position of our 'robot' if we change his controller as defined above
print("new xf: {}".format(xf.evaluate(dweights)))
print("new vf: {}".format(vf.evaluate(dweights)))
new_weights = np.array([it + dweights['d' + it.symbol_set[0]] for it in weights])
pyaudi_integration2 = de.OdeSystem(rhs, y0=y0, dense_output=True, t=(0, 10.), dt=0.01, rtol=1e-16, atol=1e-16, constants=dict(weights=new_weights))
pyaudi_integration2.set_method("RK87")
pyaudi_integration2.integrate(eta=True)
plt.figure(figsize=(8,8))
x2, v2 = pyaudi_integration2.y.T
plt.plot([it for it in x.astype(np.float64)],[it for it in v.astype(np.float64)],label='original')
plt.plot([it for it in x2.astype(np.float64)],[it for it in v2.astype(np.float64)],'r',label='simulation')
plt.plot([float(str(it.evaluate(dweights))) for it in x],[float(str(it.evaluate(dweights))) for it in v],'g',label='differential learning')
plt.plot(x.astype(np.float64)[0], v.astype(np.float64)[0], 'ro')
plt.legend(loc=2)
plt.show()
print("Differential learning xf: \t{}".format(x[-1].evaluate(dweights)))
print("Real xf: \t\t\t{}".format(x2[-1].constant_cf))
print("Mean Absolute Difference xf:\t{}".format(x2[-1].constant_cf - x[-1].evaluate(dweights)))
print()
print("Differential learning vf: \t{}".format(v[-1].evaluate(dweights)))
print("Real vf: \t\t\t{}".format(v2[-1].constant_cf))
print("Mean Absolute Difference vf:\t{}".format(v2[-1].constant_cf - v[-1].evaluate(dweights)))
pyaudi_integration3 = de.OdeSystem(rhs, y0=y0, dense_output=True, t=(0, 10.), dt=0.01, rtol=1e-16, atol=1e-16, constants=dict(weights=new_weights))
pyaudi_integration3.set_method("RK45")
pyaudi_integration3.integrate(eta=True)
x3, v3 = pyaudi_integration3.y.T
print("Difference between RK45 and RK87 xf: \t{}".format((x2[-1] - x3[-1]).constant_cf))
print("Difference between RK45 and RK87 vf: \t{}".format((v2[-1] - v3[-1]).constant_cf))
print()
print("Mean Absolute Difference xf[RK87]:\t{}".format(x2[-1].constant_cf - x[-1].evaluate(dweights)))
print("Mean Absolute Difference xf[RK45]:\t{}".format(x3[-1].constant_cf - x[-1].evaluate(dweights)))
print()
print("Mean Absolute Difference vf[RK87]:\t{}".format(v2[-1].constant_cf - v[-1].evaluate(dweights)))
print("Mean Absolute Difference vf[RK45]:\t{}".format(v3[-1].constant_cf - v[-1].evaluate(dweights)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Controller representation and “simulator”
Step2: Numerical Integration - Runge-Kutta 8(7) Dormand-Prince Method
Step3: We integrate the system using the Runge-Kutta 8(7) scheme[1] as the numerical integration system with a dense output computed using a piecewise C1 Hermite interpolating spline.
Step4: Studying the effects of the weights on the behavior
Step5: Check that we learned the correct map
|
7,506
|
<ASSISTANT_TASK:>
Python Code:
#hide
! pip install -U git+git://github.com/fastai/fastcore@master
! pip install -U git+git://github.com/fastai/nbdev@master
! pip install -U numpy
from fastcore.foundation import *
from fastcore.meta import *
from fastcore.utils import *
from fastcore.test import *
from nbdev.showdoc import *
from fastcore.dispatch import typedispatch
from functools import partial
import numpy as np
import inspect
def baz(a, b=2, c =3, d=4): return a + b + c
def foo(c, a, **kwargs):
return c + baz(a, **kwargs)
inspect.signature(foo)
def baz(a, b=2, c =3, d=4): return a + b + c
@delegates(baz) # this decorator will pass down keyword arguments from baz
def foo(c, a, **kwargs):
return c + baz(a, **kwargs)
inspect.signature(foo)
@delegates(baz, keep=True)
def foo(c, a, **kwargs):
return c + baz(a, **kwargs)
inspect.signature(foo)
def basefoo(a, b=2, c =3, d=4): pass
@delegates(basefoo, but= ['d']) # exclude `d`
def foo(c, a, **kwargs): pass
inspect.signature(foo)
class BaseFoo:
def __init__(self, e, c=2): pass
@delegates()# since no argument was passsed here we delegate to the superclass
class Foo(BaseFoo):
def __init__(self, a, b=1, **kwargs): super().__init__(**kwargs)
inspect.signature(Foo)
class Test:
def __init__(self, a, b ,c):
self.a, self.b, self.c = a, b, c
class Test:
def __init__(self, a, b, c):
store_attr()
t = Test(5,4,3)
assert t.b == 4
class Test:
def __init__(self, a, b, c):
store_attr(but=['c'])
t = Test(5,4,3)
assert t.b == 4
assert not hasattr(t, 'c')
class ParentClass:
def __init__(self): self.some_attr = 'hello'
class ChildClass(ParentClass):
def __init__(self):
super().__init__()
cc = ChildClass()
assert cc.some_attr == 'hello' # only accessible b/c you used super
class NewParent(ParentClass, metaclass=PrePostInitMeta):
def __pre_init__(self, *args, **kwargs): super().__init__()
class ChildClass(NewParent):
def __init__(self):pass
sc = ChildClass()
assert sc.some_attr == 'hello'
@typedispatch
def f(x:str, y:str): return f'{x}{y}'
@typedispatch
def f(x:np.ndarray): return x.sum()
@typedispatch
def f(x:int, y:int): return x+y
f('Hello ', 'World!')
f(2,3)
f(np.array([5,5,5,5]))
test_input = [1,2,3,4,5,6]
def f(arr, val):
"Filter a list to remove any values that are less than val."
return [x for x in arr if x >= val]
f(test_input, 3)
filter5 = partial(f, val=5)
filter5(test_input)
filter5.__doc__
filter5 = partialler(f, val=5)
filter5.__doc__
def add(arr, val): return [x + val for x in arr]
def arrsum(arr): return sum(arr)
# See the previous section on partialler
add2 = partialler(add, val=2)
transform = compose(filter5, add2, arrsum)
transform([1,2,3,4,5,6])
def fit(x, transforms:list):
"fit a model after performing transformations"
x = compose(*transforms)(x)
y = [np.mean(x)] * len(x) # its a dumb model. Don't judge me
return y
# filters out elements < 5, adds 2, then predicts the mean
fit(x=[1,2,3,4,5,6], transforms=[filter5, add2])
class Test:
def __init__(self, a, b=2, c=3): store_attr() # `store_attr` was discussed previously
Test(1)
class Test:
def __init__(self, a, b=2, c=3): store_attr()
__repr__ = basic_repr('a,b,c')
Test(2)
class MyClass(int): pass
@patch
def func(self:MyClass, a): return self+a
mc = MyClass(3)
mc.func(10)
from fastcore.utils import *
from pathlib import Path
p = Path('.')
p.ls() # you don't get this with vanilla Pathlib.Path!!
@patch
def fun(self:Path): return "This is fun!"
p.fun()
arr=np.array([5,4,3,2,1])
f = lambda a: a.sum()
assert f(arr) == 15
f = Self.sum()
assert f(arr) == 15
import pandas as pd
df=pd.DataFrame({'Some Column': ['a', 'a', 'b', 'b', ],
'Another Column': [5, 7, 50, 70]})
f = Self.groupby('Some Column').mean()
f(df)
from fastcore.imports import in_notebook, in_colab, in_ipython
in_notebook(), in_colab(), in_ipython()
L(1,2,3)
p = L.range(20).shuffle()
p
p[2,4,6]
1 + L(2,3,4)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Making **kwargs transparent
Step2: Without reading the source code, it might be hard for me to know that foo also accepts and additional parameters b and d. We can fix this with delegates
Step3: You can customize the behavior of this decorator. For example, you can have your cake and eat it too by passing down your arguments and also keeping **kwargs
Step4: You can also exclude arguments. For example, we exclude argument d from delegation
Step5: You can also delegate between classes
Step6: For more information, read the docs on delegates.
Step7: Ouch! That was painful. Look at all the repeated variable names. Do I really have to repeat myself like this when defining a class? Not Anymore! Checkout store_attr
Step8: You can also exclude certain attributes
Step9: There are many more ways of customizing and using store_attr than I highlighted here. Check out the docs for more detail.
Step10: We can avoid this boilerplate by using the metaclass PrePostInitMeta. We define a new class called NewParent that is a wrapper around the ParentClass
Step11: Type Dispatch
Step12: Below is a demonstration of type dispatch at work for the function f
Step13: There are limitations of this feature, as well as other ways of using this functionality that you can read about here. In the process of learning about typed dispatch, I also found a python library called multipledispatch made by Mathhew Rocklin (the creator of Dask).
Step14: You can create a new function out of this function using partial that sets the default value to 5
Step15: One problem with partial is that it removes the original docstring and replaces it with a generic docstring
Step16: fastcore.utils.partialler fixes this, and makes sure the docstring is retained such that the new API is transparent
Step17: Composition of functions
Step18: But why is this useful? You might me thinking, I can accomplish the same thing with
Step19: For more information about compose, read the docs.
Step20: We can use basic_repr to quickly give us a more sensible default
Step21: Monkey Patching With A Decorator
Step22: Now, MyClass has an additional method named func
Step23: Still not convinced? I'll show you another example of this kind of patching in the next section.
Step24: Wait! What's going on here? We just imported pathlib.Path - why are we getting this new functionality? Thats because we imported the fastcore.utils module, which patches this module via the @patch decorator discussed earlier. Just to drive the point home on why the @patch decorator is useful, I'll go ahead and add another method to Path right now
Step25: That is magical, right? I know! That's why I'm writing about it!
Step26: You can use Self in the same way
Step27: Let's create a lambda that does a groupby and max of a Pandas dataframe
Step28: Read more about Self in the docs.
Step29: This is useful if you are displaying certain types of visualizations, progress bars or animations in your code that you may want to modify or toggle depending on the environment.
Step30: Shuffle a list
Step31: Index into a list
Step32: L has sensible defaults, for example appending an element to a list
|
7,507
|
<ASSISTANT_TASK:>
Python Code:
def write_answer_to_file(answer, filename):
with open(filename, 'w') as f_out:
f_out.write(str(round(answer, 3)))
import pandas as pd
adver_data = pd.read_csv('advertising.csv')
# Ваш код здесь
adver_data.head(5)
# Ваш код здесь
# Создает различные сводные статистические данные, исключая значения NaN.
print adver_data.describe ()
X = adver_data[['TV','Radio','Newspaper']].values
y = adver_data[['Sales']].values
print X[0]
print y[0]
import numpy as np
# mean - среднее
# std - стандартное отклонение
means, stds = np.mean(X,axis=0),np.std(X,axis=0)
X = (X - means)/stds
#добавить стобец из 1 к Х,в кол-ве столбцов X.shape[0]
X = np.hstack([X,np.ones((X.shape[0],1))])
X[0]
def mserror(y, y_pred):
# Ваш код здесь
# sum(iter, start=0) - Сумма членов последовательности.
return sum((y - y_pred)**2,0)/y.shape[0]
# медианное значение Sales
med = np.array([np.median(y)]*X.shape[0])
med = med.reshape(X.shape[0],1)
answer1 = mserror(y,med)
print(round(answer1,3))
write_answer_to_file(round(answer1,3), '1.txt')
def normal_equation(X, y):
ans = np.dot(np.dot(np.linalg.pinv(np.dot(X.T,X)),X.T),y)
return ans
norm_eq_weights = normal_equation(X, y)
print(norm_eq_weights)
#Возвращает среднее значение элементов массива
average_value = np.mean(X,axis=0)
answer2 = np.dot(average_value,norm_eq_weights)
print(round(answer2,3))
write_answer_to_file(round(answer2,3), '2.txt')
def linear_prediction(X, w):
return np.dot(X,w)
answer3 = normal_equation(y,linear_prediction(X,norm_eq_weights))
print(round(answer3,3))
write_answer_to_file(round(answer3,3), '3.txt')
def stochastic_gradient_step(X, y, w, train_ind, eta=0.01):
grad0 = w[0]-(2.0*eta)/X.shape[0]*X[train_ind,0]*((w[0]*X[train_ind,0]+w[1]*X[train_ind,1]+w[2]*X[train_ind,2]+w[3]*X[train_ind,3])-y[train_ind])
grad1 = w[1]-(2.0*eta)/X.shape[0]*X[train_ind,1]*((w[0]*X[train_ind,0]+w[1]*X[train_ind,1]+w[2]*X[train_ind,2]+w[3]*X[train_ind,3])-y[train_ind])
grad2 = w[2]-(2.0*eta)/X.shape[0]*X[train_ind,2]*((w[0]*X[train_ind,0]+w[1]*X[train_ind,1]+w[2]*X[train_ind,2]+w[3]*X[train_ind,3])-y[train_ind])
grad3 = w[3]-(2.0*eta)/X.shape[0]*X[train_ind,3]*((w[0]*X[train_ind,0]+w[1]*X[train_ind,1]+w[2]*X[train_ind,2]+w[3]*X[train_ind,3])-y[train_ind])
return np.array([grad0, grad1, grad2, grad3])
def stochastic_gradient_descent(X, y, w_init, eta=1e-2, max_iter=1e4,
min_weight_dist=1e-8, seed=42, verbose=False):
# Инициализируем расстояние между векторами весов на соседних
# итерациях большим числом.
weight_dist = np.inf
# Инициализируем вектор весов
w = w_init
# Сюда будем записывать ошибки на каждой итерации
errors = []
# Счетчик итераций
iter_num = 0
# Будем порождать псевдослучайные числа
# (номер объекта, который будет менять веса), а для воспроизводимости
# этой последовательности псевдослучайных чисел используем seed.
np.random.seed(seed)
# Основной цикл
while weight_dist > min_weight_dist and iter_num < max_iter:
# порождаем псевдослучайный
# индекс объекта обучающей выборки
random_ind = np.random.randint(X.shape[0])
# Ваш код здесь
old_w = w
w = stochastic_gradient_step(X, y, w, random_ind, eta=0.01)
weight_dist = np.linalg.norm(w - old_w)
errors.append(mserror(y,np.dot(X,w)))
iter_num += 1
return w, errors
%%time
stoch_grad_desc_weights, stoch_errors_by_iter = stochastic_gradient_descent(X,
y, [0,0,0,0], 0.01, 100000, 1e-8, 42, False)
%pylab inline
plot(range(50), stoch_errors_by_iter[:50])
xlabel('Iteration number')
ylabel('MSE')
%pylab inline
plot(range(len(stoch_errors_by_iter)), stoch_errors_by_iter)
xlabel('Iteration number')
ylabel('MSE')
stoch_grad_desc_weights
stoch_errors_by_iter[-1]
answer4 = mserror(y, linear_prediction(X, stoch_grad_desc_weights))
print(answer4)
write_answer_to_file(answer4, '4.txt')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Загрузите данные из файла advertising.csv в объект pandas DataFrame. Источник данных.
Step2: Посмотрите на первые 5 записей и на статистику признаков в этом наборе данных.
Step3: Создайте массивы NumPy X из столбцов TV, Radio и Newspaper и y - из столбца Sales. Используйте атрибут values объекта pandas DataFrame.
Step4: Отмасштабируйте столбцы матрицы X, вычтя из каждого значения среднее по соответствующему столбцу и поделив результат на стандартное отклонение. Для определенности, используйте методы mean и std векторов NumPy (реализация std в Pandas может отличаться). Обратите внимание, что в numpy вызов функции .mean() без параметров возвращает среднее по всем элементам массива, а не по столбцам, как в pandas. Чтобы произвести вычисление по столбцам, необходимо указать параметр axis.
Step5: Добавьте к матрице X столбец из единиц, используя методы hstack, ones и reshape библиотеки NumPy. Вектор из единиц нужен для того, чтобы не обрабатывать отдельно коэффициент $w_0$ линейной регрессии.
Step6: 2. Реализуйте функцию mserror - среднеквадратичную ошибку прогноза. Она принимает два аргумента - объекты Series y (значения целевого признака) и y_pred (предсказанные значения). Не используйте в этой функции циклы - тогда она будет вычислительно неэффективной.
Step7: Какова среднеквадратичная ошибка прогноза значений Sales, если всегда предсказывать медианное значение Sales по исходной выборке? Запишите ответ в файл '1.txt'.
Step8: 3. Реализуйте функцию normal_equation, которая по заданным матрицам (массивам NumPy) X и y вычисляет вектор весов $w$ согласно нормальному уравнению линейной регрессии.
Step9: Какие продажи предсказываются линейной моделью с весами, найденными с помощью нормального уравнения, в случае средних инвестиций в рекламу по ТВ, радио и в газетах? (то есть при нулевых значениях масштабированных признаков TV, Radio и Newspaper). Запишите ответ в файл '2.txt'.
Step10: 4. Напишите функцию linear_prediction, которая принимает на вход матрицу X и вектор весов линейной модели w, а возвращает вектор прогнозов в виде линейной комбинации столбцов матрицы X с весами w.
Step11: Какова среднеквадратичная ошибка прогноза значений Sales в виде линейной модели с весами, найденными с помощью нормального уравнения? Запишите ответ в файл '3.txt'.
Step12: 5. Напишите функцию stochastic_gradient_step, реализующую шаг стохастического градиентного спуска для линейной регрессии. Функция должна принимать матрицу X, вектора y и w, число train_ind - индекс объекта обучающей выборки (строки матрицы X), по которому считается изменение весов, а также число $\eta$ (eta) - шаг градиентного спуска (по умолчанию eta=0.01). Результатом будет вектор обновленных весов. Наша реализация функции будет явно написана для данных с 3 признаками, но несложно модифицировать для любого числа признаков, можете это сделать.
Step13: 6. Напишите функцию stochastic_gradient_descent, реализующую стохастический градиентный спуск для линейной регрессии. Функция принимает на вход следующие аргументы
Step14: Запустите $10^5$ итераций стохастического градиентного спуска. Укажите вектор начальных весов w_init, состоящий из нулей. Оставьте параметры eta и seed равными их значениям по умолчанию (eta=0.01, seed=42 - это важно для проверки ответов).
Step15: Посмотрим, чему равна ошибка на первых 50 итерациях стохастического градиентного спуска. Видим, что ошибка не обязательно уменьшается на каждой итерации.
Step16: Теперь посмотрим на зависимость ошибки от номера итерации для $10^5$ итераций стохастического градиентного спуска. Видим, что алгоритм сходится.
Step17: Посмотрим на вектор весов, к которому сошелся метод.
Step18: Посмотрим на среднеквадратичную ошибку на последней итерации.
Step19: Какова среднеквадратичная ошибка прогноза значений Sales в виде линейной модели с весами, найденными с помощью градиентного спуска? Запишите ответ в файл '4.txt'.
|
7,508
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
def get_pdb_divide_params(frequency, F_BUS=int(48e6)):
mult_factor = np.array([1, 10, 20, 40])
prescaler = np.arange(8)
clock_divide = (pd.DataFrame([[i, m, p, m * (1 << p)]
for i, m in enumerate(mult_factor) for p in prescaler],
columns=['mult_', 'mult_factor', 'prescaler', 'combined'])
.drop_duplicates(subset=['combined'])
.sort_values('combined', ascending=True))
clock_divide['clock_mod'] = (F_BUS / frequency / clock_divide.combined).astype(int)
return clock_divide.loc[clock_divide.clock_mod <= 0xffff]
PDB0_IDLY = 0x4003600C # Interrupt Delay Register
PDB0_SC = 0x40036000 # Status and Control Register
PDB0_MOD = 0x40036004 # Modulus Register
PDB_SC_PDBEIE = 0x00020000 # Sequence Error Interrupt Enable
PDB_SC_SWTRIG = 0x00010000 # Software Trigger
PDB_SC_DMAEN = 0x00008000 # DMA Enable
PDB_SC_PDBEN = 0x00000080 # PDB Enable
PDB_SC_PDBIF = 0x00000040 # PDB Interrupt Flag
PDB_SC_PDBIE = 0x00000020 # PDB Interrupt Enable.
PDB_SC_CONT = 0x00000002 # Continuous Mode Enable
PDB_SC_LDOK = 0x00000001 # Load OK
def PDB_SC_TRGSEL(n): return (((n) & 15) << 8) # Trigger Input Source Select
def PDB_SC_PRESCALER(n): return (((n) & 7) << 12) # Prescaler Divider Select
def PDB_SC_MULT(n): return (((n) & 3) << 2) # Multiplication Factor
def PDB_SC_LDMOD(n): return (((n) & 3) << 18) # Load Mode Select
# PDB0_IDLY = 1; // the pdb interrupt happens when IDLY is equal to CNT+1
proxy.mem_cpy_host_to_device(PDB0_IDLY, np.uint32(1).tostring())
# software trigger enable PDB continuous
PDB_CONFIG = (PDB_SC_TRGSEL(15) | PDB_SC_PDBEN | PDB_SC_CONT | PDB_SC_LDMOD(0))
PDB0_SC_ = (PDB_CONFIG | PDB_SC_PRESCALER(clock_divide.prescaler) |
PDB_SC_MULT(clock_divide.mult_) |
PDB_SC_DMAEN | PDB_SC_LDOK) # load all new values
proxy.mem_cpy_host_to_device(PDB0_SC, np.uint32(PDB0_SC_).tostring())
clock_divide = get_pdb_divide_params(25).iloc[0]
# PDB0_MOD = (uint16_t)(mod-1);
proxy.mem_cpy_host_to_device(PDB0_MOD, np.uint32(clock_divide.clock_mod).tostring())
PDB0_SC_ = (PDB_CONFIG | PDB_SC_PRESCALER(clock_divide.prescaler) |
PDB_SC_DMAEN | PDB_SC_MULT(clock_divide.mult_) |
PDB_SC_SWTRIG) # start the counter!
proxy.mem_cpy_host_to_device(PDB0_SC, np.uint32(PDB0_SC_).tostring())
PDB0_SC_ = 0
proxy.mem_cpy_host_to_device(PDB0_SC, np.uint32(PDB0_SC_).tostring())
import arduino_helpers.hardware.teensy as teensy
from arduino_rpc.protobuf import resolve_field_values
from teensy_minimal_rpc import SerialProxy
import teensy_minimal_rpc.DMA as DMA
import teensy_minimal_rpc.ADC as ADC
import teensy_minimal_rpc.SIM as SIM
import teensy_minimal_rpc.PIT as PIT
# Disconnect from existing proxy (if available)
try:
del proxy
except NameError:
pass
proxy = SerialProxy()
proxy.pin_mode(teensy.LED_BUILTIN, 1)
from IPython.display import display
proxy.update_sim_SCGC6(SIM.R_SCGC6(PDB=True))
sim_scgc6 = SIM.R_SCGC6.FromString(proxy.read_sim_SCGC6().tostring())
display(resolve_field_values(sim_scgc6)[['full_name', 'value']].T)
# proxy.update_pit_registers(PIT.Registers(MCR=PIT.R_MCR(MDIS=False)))
# pit_registers = PIT.Registers.FromString(proxy.read_pit_registers().tostring())
# display(resolve_field_values(pit_registers)[['full_name', 'value']].T)
import numpy as np
# CORE_PIN13_PORTSET = CORE_PIN13_BITMASK;
# CORE_PIN13_PORTCLEAR = CORE_PIN13_BITMASK;
#define CORE_PIN13_PORTCLEAR GPIOC_PCOR
#define CORE_PIN13_PORTSET GPIOC_PSOR
#define GPIOC_PCOR (*(volatile uint32_t *)0x400FF088) // Port Clear Output Register
#define GPIOC_PSOR (*(volatile uint32_t *)0x400FF084) // Port Set Output Register
CORE_PIN13_BIT = 5
GPIOC_PCOR = 0x400FF088 # Port Clear Output Register
GPIOC_PSOR = 0x400FF084 # Port Set Output Register
proxy.mem_cpy_host_to_device(GPIOC_PSOR, np.uint32(1 << CORE_PIN13_BIT).tostring())
proxy.update_dma_mux_chcfg(0, DMA.MUX_CHCFG(ENBL=1, TRIG=0, SOURCE=48))
proxy.update_dma_registers(DMA.Registers(SERQ=0))
proxy.update_dma_registers(DMA.Registers(CERQ=0))
resolve_field_values(DMA.MUX_CHCFG.FromString(proxy.read_dma_mux_chcfg(0).tostring()))[['full_name', 'value']]
print proxy.update_pit_timer_config(0, PIT.TimerConfig(LDVAL=int(48e6)))
print proxy.update_pit_timer_config(0, PIT.TimerConfig(TCTRL=PIT.R_TCTRL(TEN=True)))
pit0 = PIT.TimerConfig.FromString(proxy.read_pit_timer_config(0).tostring())
display(resolve_field_values(pit0)[['full_name', 'value']].T)
PIT_LDVAL0 = 0x40037100 # Timer Load Value Register
PIT_CVAL0 = 0x40037104 # Current Timer Value Register
PIT_TCTRL0 = 0x40037108 # Timer Control Register
proxy.mem_cpy_host_to_device(PIT_TCTRL0, np.uint32(1).tostring())
proxy.mem_cpy_device_to_host(PIT_TCTRL0, 4).view('uint32')[0]
proxy.digital_write(teensy.LED_BUILTIN, 0)
proxy.update_dma_registers(DMA.Registers(SSRT=0))
proxy.free_all()
toggle_pin_addr = proxy.mem_alloc(4)
proxy.mem_cpy_host_to_device(toggle_pin_addr, np.uint32(1 << CORE_PIN13_BIT).tostring())
tcds_addr = proxy.mem_aligned_alloc(32, 2 * 32)
hw_tcds_addr = 0x40009000
tcd_addrs = [tcds_addr + 32 * i for i in xrange(2)]
# Create Transfer Control Descriptor configuration for first chunk, encoded
# as a Protocol Buffer message.
tcd0_msg = DMA.TCD(CITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),
BITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),
ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._32_BIT,
DSIZE=DMA.R_TCD_ATTR._32_BIT),
NBYTES_MLNO=4,
SADDR=int(toggle_pin_addr),
SOFF=0,
SLAST=0,
DADDR=int(GPIOC_PSOR),
DOFF=0,
# DLASTSGA=0,
# CSR=DMA.R_TCD_CSR(START=0, DONE=False, ESG=False))
# proxy.update_dma_TCD(0, tcd0_msg)
DLASTSGA=int(tcd_addrs[1]),
CSR=DMA.R_TCD_CSR(START=0, DONE=False, ESG=True))
# # Convert Protocol Buffer encoded TCD to bytes structure.
tcd0 = proxy.tcd_msg_to_struct(tcd0_msg)
# Create binary TCD struct for each TCD protobuf message and copy to device
# memory.
for i in xrange(2):
tcd_i = tcd0.copy()
tcd_i['DADDR'] = [GPIOC_PSOR, GPIOC_PCOR][i]
tcd_i['DLASTSGA'] = tcd_addrs[(i + 1) % len(tcd_addrs)]
tcd_i['CSR'] |= (1 << 4)
proxy.mem_cpy_host_to_device(tcd_addrs[i], tcd_i.tostring())
# Load initial TCD in scatter chain to DMA channel chosen to handle scattering.
proxy.mem_cpy_host_to_device(hw_tcds_addr, tcd0.tostring())
proxy.update_dma_registers(DMA.Registers(SSRT=0))
dma_channel_scatter = 0
dma_channel_i = 1
dma_channel_ii = 2
# Set ADC parameters
proxy.setAveraging(16, teensy.ADC_0)
proxy.setResolution(16, teensy.ADC_0)
proxy.setConversionSpeed(teensy.ADC_MED_SPEED, teensy.ADC_0)
proxy.setSamplingSpeed(teensy.ADC_MED_SPEED, teensy.ADC_0)
proxy.update_adc_registers(
teensy.ADC_0,
ADC.Registers(CFG2=ADC.R_CFG2(MUXSEL=ADC.R_CFG2.B)))
DMAMUX_SOURCE_ADC0 = 40 # from `kinetis.h`
DMAMUX_SOURCE_ADC1 = 41 # from `kinetis.h`
# DMAMUX0_CFGi[SOURCE] = DMAMUX_SOURCE_ADC0 // Route ADC0 as DMA channel source.
# DMAMUX0_CFGi[TRIG] = 0 // Disable periodic trigger.
# DMAMUX0_CFGi[ENBL] = 1 // Enable the DMAMUX configuration for channel.
proxy.update_dma_mux_chcfg(dma_channel_ii,
DMA.MUX_CHCFG(SOURCE=DMAMUX_SOURCE_ADC0,
TRIG=False,
ENBL=True))
# DMA request input signals and this enable request flag
# must be asserted before a channel’s hardware service
# request is accepted (21.3.3/394).
# DMA_SERQ = i
proxy.update_dma_registers(DMA.Registers(SERQ=dma_channel_ii))
proxy.enableDMA(teensy.ADC_0)
proxy.DMA_registers().loc['']
dmamux = DMA.MUX_CHCFG.FromString(proxy.read_dma_mux_chcfg(dma_channel_ii).tostring())
resolve_field_values(dmamux)[['full_name', 'value']]
adc0 = ADC.Registers.FromString(proxy.read_adc_registers(teensy.ADC_0).tostring())
resolve_field_values(adc0)[['full_name', 'value']].loc[['CFG2', 'SC1A', 'SC3']]
import re
import numpy as np
import pandas as pd
import arduino_helpers.hardware.teensy.adc as adc
# The number of samples to record for each ADC channel.
sample_count = 10
teensy_analog_channels = ['A0', 'A1', 'A0', 'A3', 'A0']
sc1a_pins = pd.Series(dict([(v, adc.CHANNEL_TO_SC1A_ADC0[getattr(teensy, v)])
for v in dir(teensy) if re.search(r'^A\d+', v)]))
channel_sc1as = np.array(sc1a_pins[teensy_analog_channels].tolist(), dtype='uint32')
proxy.free_all()
N = np.dtype('uint16').itemsize * channel_sc1as.size
# Allocate source array
adc_result_addr = proxy.mem_alloc(N)
# Fill result array with zeros
proxy.mem_fill_uint8(adc_result_addr, 0, N)
# Copy channel SC1A configurations to device memory
adc_sda1s_addr = proxy.mem_aligned_alloc_and_set(4, channel_sc1as.view('uint8'))
# Allocate source array
samples_addr = proxy.mem_alloc(sample_count * N)
tcds_addr = proxy.mem_aligned_alloc(32, sample_count * 32)
hw_tcds_addr = 0x40009000
tcd_addrs = [tcds_addr + 32 * i for i in xrange(sample_count)]
hw_tcd_addrs = [hw_tcds_addr + 32 * i for i in xrange(sample_count)]
# Fill result array with zeros
proxy.mem_fill_uint8(samples_addr, 0, sample_count * N)
# Create Transfer Control Descriptor configuration for first chunk, encoded
# as a Protocol Buffer message.
tcd0_msg = DMA.TCD(CITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),
BITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ITER=1),
ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._16_BIT,
DSIZE=DMA.R_TCD_ATTR._16_BIT),
NBYTES_MLNO=channel_sc1as.size * 2,
SADDR=int(adc_result_addr),
SOFF=2,
SLAST=-channel_sc1as.size * 2,
DADDR=int(samples_addr),
DOFF=2 * sample_count,
DLASTSGA=int(tcd_addrs[1]),
CSR=DMA.R_TCD_CSR(START=0, DONE=False, ESG=True))
# Convert Protocol Buffer encoded TCD to bytes structure.
tcd0 = proxy.tcd_msg_to_struct(tcd0_msg)
# Create binary TCD struct for each TCD protobuf message and copy to device
# memory.
for i in xrange(sample_count):
tcd_i = tcd0.copy()
tcd_i['SADDR'] = adc_result_addr
tcd_i['DADDR'] = samples_addr + 2 * i
tcd_i['DLASTSGA'] = tcd_addrs[(i + 1) % len(tcd_addrs)]
tcd_i['CSR'] |= (1 << 4)
proxy.mem_cpy_host_to_device(tcd_addrs[i], tcd_i.tostring())
# Load initial TCD in scatter chain to DMA channel chosen to handle scattering.
proxy.mem_cpy_host_to_device(hw_tcd_addrs[dma_channel_scatter],
tcd0.tostring())
print 'ADC results:', proxy.mem_cpy_device_to_host(adc_result_addr, N).view('uint16')
print 'Analog pins:', proxy.mem_cpy_device_to_host(adc_sda1s_addr, len(channel_sc1as) *
channel_sc1as.dtype.itemsize).view('uint32')
ADC0_SC1A = 0x4003B000 # ADC status and control registers 1
sda1_tcd_msg = DMA.TCD(CITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ELINK=False, ITER=channel_sc1as.size),
BITER_ELINKNO=DMA.R_TCD_ITER_ELINKNO(ELINK=False, ITER=channel_sc1as.size),
ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._32_BIT,
DSIZE=DMA.R_TCD_ATTR._32_BIT),
NBYTES_MLNO=4,
SADDR=int(adc_sda1s_addr),
SOFF=4,
SLAST=-channel_sc1as.size * 4,
DADDR=int(ADC0_SC1A),
DOFF=0,
DLASTSGA=0,
CSR=DMA.R_TCD_CSR(START=0, DONE=False))
proxy.update_dma_TCD(dma_channel_i, sda1_tcd_msg)
ADC0_RA = 0x4003B010 # ADC data result register
ADC0_RB = 0x4003B014 # ADC data result register
tcd_msg = DMA.TCD(CITER_ELINKYES=DMA.R_TCD_ITER_ELINKYES(ELINK=True, LINKCH=1, ITER=channel_sc1as.size),
BITER_ELINKYES=DMA.R_TCD_ITER_ELINKYES(ELINK=True, LINKCH=1, ITER=channel_sc1as.size),
ATTR=DMA.R_TCD_ATTR(SSIZE=DMA.R_TCD_ATTR._16_BIT,
DSIZE=DMA.R_TCD_ATTR._16_BIT),
NBYTES_MLNO=2,
SADDR=ADC0_RA,
SOFF=0,
SLAST=0,
DADDR=int(adc_result_addr),
DOFF=2,
DLASTSGA=-channel_sc1as.size * 2,
CSR=DMA.R_TCD_CSR(START=0, DONE=False,
MAJORELINK=True,
MAJORLINKCH=dma_channel_scatter))
proxy.update_dma_TCD(dma_channel_ii, tcd_msg)
# Clear output array to zero.
proxy.mem_fill_uint8(adc_result_addr, 0, N)
proxy.mem_fill_uint8(samples_addr, 0, sample_count * N)
# Software trigger channel $i$ to copy *first* SC1A configuration, which
# starts ADC conversion for the first channel.
#
# Conversions for subsequent ADC channels are triggered through minor-loop
# linking from DMA channel $ii$ to DMA channel $i$ (*not* through explicit
# software trigger).
print 'ADC results:'
for i in xrange(sample_count):
proxy.update_dma_registers(DMA.Registers(SSRT=dma_channel_i))
# Display converted ADC values (one value per channel in `channel_sd1as` list).
print ' Iteration %s:' % i, proxy.mem_cpy_device_to_host(adc_result_addr, N).view('uint16')
print ''
print 'Samples by channel:'
# Trigger once per chunk
# for i in xrange(sample_count):
# proxy.update_dma_registers(DMA.Registers(SSRT=0))
device_dst_data = proxy.mem_cpy_device_to_host(samples_addr, sample_count * N)
pd.DataFrame(device_dst_data.view('uint16').reshape(-1, sample_count).T,
columns=teensy_analog_channels)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Overview
Step2: Configure ADC sample rate, etc.
Step3: Pseudo-code to set DMA channel $i$ to be triggered by ADC0 conversion complete.
Step4: Analog channel list
Step5: Allocate and initialize device arrays
Step6: Configure DMA channel $i$
Step7: Configure DMA channel $ii$
Step8: Trigger sample scan across selected ADC channels
|
7,509
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.stats as sps
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
from scipy.linalg import inv
from numpy.linalg import norm
class LinearRegression:
def __init__(self):
super()
def fit(self, X, Y, alpha=0.95):
''' Обучение модели. Предполагается модель Y = X * theta + epsilon,
где X --- регрессор, Y --- отклик,
а epsilon имеет нормальное распределение с параметрами (0, sigma^2 * I_n).
alpha --- уровень доверия для доверительного интервала.
'''
# Размер выборки и число признаков
self.n, self.k = X.shape
# Оценки на параметры
self.theta = inv(X.T @ X) @ X.T @ Y
self.sigma_sq = norm(Y - X @ self.theta) ** 2 / (self.n - self.k)
# Считаем доверительные интервалы
l_quant = sps.t.ppf((1 - alpha) / 2, df=self.n - self.k)
r_quant = sps.t.ppf((1 + alpha) / 2, df=self.n - self.k)
diag = inv(X.T @ X).diagonal()
coeff = np.sqrt(self.sigma_sq * diag)
self.conf_int = np.array([self.theta + l_quant * coeff, self.theta + r_quant * coeff]).T
return self
def summary(self):
print('Linear regression on %d features and %d examples' % (self.k, self.n))
print('Sigma: %.6f' % self.sigma_sq)
print('\t\tLower\t\tEstimation\tUpper')
for j in range(self.k):
print('theta_%d:\t%.6f\t%.6f\t%.6f' % (j, self.conf_int[j, 0],
self.theta[j], self.conf_int[j, 1]))
def predict(self, X):
''' Возвращает предсказание отклика на новых объектах X. '''
Y_pred = X @ self.theta
return Y_pred
import csv
# Я загурзил файл локально
data = np.array(list(csv.reader(open('ice_cream.txt', 'r'), delimiter='\t')))
source = data[1:, :].astype(float)
print(source[:5])
# Переводим в Цельсий
source[:, 4] = (source[:, 4] - 32) / 1.8
source[:, 5] = (source[:, 5] - 32) / 1.8
# Размер выборки и число параметров
n, k = source.shape[0], 2
print(n)
# Отклик и регрессор
Y = source[:, 1]
X = np.zeros((n, k))
X[:, 0] = np.ones(n)
X[:, 1] = source[:, 4]
print(X[:5])
# Обучаем модель
model = LinearRegression()
model.fit(X, Y)
# Выводим общую информацию
model.summary()
grid = np.linspace(-5, 25, 1000)
plt.figure(figsize=(20, 8))
plt.plot(grid, model.theta[0] + grid * model.theta[1], color='brown', label='Предсказание', linewidth=2.5)
plt.scatter(X[:, 1], Y, s=40.0, label='Выборка', color='red', alpha=0.5)
plt.legend()
plt.ylabel('Литров на человека')
plt.xlabel('Температура')
plt.title('Потребление мороженого')
plt.grid()
plt.show()
# Размер выборки и число параметров
n, k = source.shape[0], 4
# Отклик и регрессор
Y = source[:, 1]
X = np.zeros((n, k))
X[:, 0] = np.ones(n)
X[:, 1] = source[:, 4]
X[:, 2] = (source[:, 6] == 1).astype(int)
X[:, 3] = (source[:, 6] == 2).astype(int)
print(X[:5])
# Обучаем модель
model = LinearRegression()
model.fit(X, Y)
# Выводим общую информацию
model.summary()
grid = np.linspace(-5, 25, 1000)
y_0 = model.theta[0] + grid * model.theta[1]
y_1 = model.theta[0] + grid * model.theta[1] + model.theta[2]
y_2 = model.theta[0] + grid * model.theta[1] + model.theta[3]
plt.figure(figsize=(20, 8))
plt.plot(grid, y_0, color='gold', label='Предсказание (год 0)', linewidth=2.5)
plt.plot(grid, y_1, color='turquoise', label='Предсказание (год 1)', linewidth=2.5)
plt.plot(grid, y_2, color='springgreen', label='Предсказание (год 2)', linewidth=2.5)
plt.scatter(X[source[:, 6] == 0, 1], Y[source[:, 6] == 0], s=40.0, label='Выборка (год 0)', color='yellow', alpha=0.5)
plt.scatter(X[source[:, 6] == 1, 1], Y[source[:, 6] == 1], s=40.0, label='Выборка (год 1)', color='blue', alpha=0.5)
plt.scatter(X[source[:, 6] == 2, 1], Y[source[:, 6] == 2], s=40.0, label='Выборка (год 2)', color='green', alpha=0.5)
plt.legend()
plt.ylabel('Литров на человека')
plt.xlabel('Температура')
plt.title('Потребление мороженого')
plt.grid()
plt.show()
# Число признаков
k = 2
# Размеры подвыборок
n_0 = (source[:, 6] == 0).sum()
n_1 = (source[:, 6] == 1).sum()
n_2 = (source[:, 6] == 2).sum()
print(n_0, n_1, n_2)
# Три подвыборки
source_0, source_1, source_2 = np.vsplit(source, [n_0, n_0 + n_1])
print(source_0.astype(int)[:3]) # Кастим к инт, чтобы при выводе не размазывались по всей строке
print(source_1.astype(int)[:3])
print(source_2.astype(int)[:3])
# Отклики и регрессоры
Y_0 = source_0[:, 1]
X_0 = np.zeros((n_0, k))
X_0[:, 0] = np.ones(n_0)
X_0[:, 1] = source_0[:, 4]
print(X_0[:3])
Y_1 = source_1[:, 1]
X_1 = np.zeros((n_1, k))
X_1[:, 0] = np.ones(n_1)
X_1[:, 1] = source_1[:, 4]
print(X_1[:3])
Y_2 = source_2[:, 1]
X_2 = np.zeros((n_2, k))
X_2[:, 0] = np.ones(n_2)
X_2[:, 1] = source_2[:, 4]
print(X_2[:3])
# Обучаем модели
model_0 = LinearRegression()
model_0.fit(X_0, Y_0)
model_1 = LinearRegression()
model_1.fit(X_1, Y_1)
model_2 = LinearRegression()
model_2.fit(X_2, Y_2)
# Выводим общую информацию
model_0.summary()
model_1.summary()
model_2.summary()
grid = np.linspace(-5, 25, 1000)
plt.figure(figsize=(20, 8))
plt.plot(grid, model_0.theta[0] + grid * model_0.theta[1], color='gold', label='Предсказание (год 0)', linewidth=2.5)
plt.plot(grid, model_1.theta[0] + grid * model_1.theta[1], color='turquoise', label='Предсказание (год 1)', linewidth=2.5)
plt.plot(grid, model_2.theta[0] + grid * model_2.theta[1], color='springgreen', label='Предсказание (год 2)', linewidth=2.5)
plt.scatter(X[source[:, 6] == 0, 1], Y[source[:, 6] == 0], s=40.0, label='Выборка (год 0)', color='yellow', alpha=0.5)
plt.scatter(X[source[:, 6] == 1, 1], Y[source[:, 6] == 1], s=40.0, label='Выборка (год 1)', color='blue', alpha=0.5)
plt.scatter(X[source[:, 6] == 2, 1], Y[source[:, 6] == 2], s=40.0, label='Выборка (год 2)', color='green', alpha=0.5)
plt.legend()
plt.ylabel('Литров на человека')
plt.xlabel('Температура')
plt.title('Потребление мороженого')
plt.grid()
plt.show()
# Размер выборки и число параметров
n, k = source.shape[0], 8
print(n, k)
# Cтроим регрессор
X = np.zeros((n, k))
X[:, 0] = np.ones(n)
X[:, 1] = source[:, 4] # Температура
X[:, 2] = source[:, 0] # Дата
X[:, 3:5] = source[:, 2:4] # Пропускаем IC
X[:, 5] = source[:, 5]
X[:, 6] = (source[:, 6] == 1).astype(int) # Индикатор год 1
X[:, 7] = (source[:, 6] == 2).astype(int) # Индикатор год 2
print(X.astype(int)[:5])
# Отлкик
Y = source[:, 1]
# Обучаем модель
model = LinearRegression()
model.fit(X, Y)
# Выводим общую информацию
model.summary()
# Размер выборки и число параметров
n, k = source.shape[0], 4
print(n, k)
# Отклик и регрессор
Y = source[:, 1]
X = np.zeros((n, k))
X[:, 0] = np.ones(n)
X[:, 1] = source[:, 4]
X[:, 2] = X[:, 1] ** 2
X[:, 3] = X[:, 1] ** 3
print(X[:5])
# Обучаем модель
model = LinearRegression()
model.fit(X, Y)
# Выводим общую информацию
model.summary()
grid = np.linspace(-5, 25, 1000)
y = model.theta[0] + model.theta[1] * grid + model.theta[2] * grid ** 2 + model.theta[3] * grid ** 3
plt.figure(figsize=(20, 8))
plt.plot(grid, y, color='brown', label='Предсказание (год 0)', linewidth=2.5)
plt.scatter(X[:, 1], Y, s=40.0, label='Выборка', color='red', alpha=0.5)
plt.legend()
plt.ylabel('Литров на человека (первый случай)')
plt.xlabel('Температура')
plt.title('Потребление мороженого')
plt.grid()
plt.show()
D = inv(X.T @ X)
print(D)
from scipy.linalg import eigvals
vals = eigvals(D)
print(vals) # Комплексных нет, мы не налажали
vals = vals.real.astype(float)
print(vals)
CI = np.sqrt(vals.max() / vals.min())
print(CI)
from sklearn import linear_model
from sklearn import cross_validation
from sklearn.metrics import mean_squared_error
def best_features(X_train, X_test, Y_train, Y_test):
mses = [] # сюда записывайте значения MSE
k = X_train.shape[1]
for j in range(1, 2 ** k): # номер набора признаков
mask = np.array([j & (1 << s) for s in range(k)], dtype=bool)
features_numbers = np.arange(k)[mask] # набор признаков
model = linear_model.LinearRegression()
model.fit(X_train[:, features_numbers], Y_train)
mse = mean_squared_error(Y_test, model.predict(X_test[:, features_numbers])) # MSE для данного набора признаков
mses.append(mse)
# Печать 10 лучших наборов
print('mse\t features')
mses = np.array(mses)
best_numbres = np.argsort(mses)[:10]
for j in best_numbres:
mask = np.array([j & (1 << s) for s in range(k)], dtype=bool)
features_numbers = np.arange(k)[mask]
print('%.3f\t' % mses[j], features_numbers)
# Я загурзил файл локально и исправил проблелы на табы (там были лишние проблеы в некоторых местах)
data = np.array(list(csv.reader(open('yh.data', 'r'), delimiter='\t')))
yacht = data.astype(float)
print(yacht)
Y = yacht[:, 6]
X = yacht[:, :6]
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.3)
best_features(X_train, X_test, Y_train, Y_test)
# Я загурзил файл локально и убрал первую стрчоку с кучей запятых
data = np.array(list(csv.reader(open('bhp.csv', 'r'))))
houses = data[1:, :].astype(float)
print(houses)
# Число столбцов
cols = houses.shape[1]
Y = houses[:, 9] # Столбец TAX
X = houses[:, np.delete(np.arange(cols), 9)]
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.3)
best_features(X_train, X_test, Y_train, Y_test)
def find_conf_reg(sample):
size = sample.size
alpha_r = np.sqrt(0.95)
u_1 = sps.t.ppf((1 - alpha_r) / 2, df=size - 1)
u_2 = sps.t.ppf((1 + alpha_r) / 2, df=size - 1)
v_1 = sps.chi2.ppf((1 - alpha_r) / 2, df=size - 1)
v_2 = sps.chi2.ppf((1 + alpha_r) / 2, df=size - 1)
mean = sample.mean()
std = sample.std()
a_low = mean - u_2 * (std / size) ** 0.5
a_high = mean - u_1 * (std / size) ** 0.5
s_low = (size - 1) * std / v_2
s_hight = (size - 1) * std / v_1
return ((a_low, a_high), (s_low, s_hight))
plt.figure(figsize=(20, 8))
for size in [5, 20, 50]:
a_conf, s_conf = find_conf_reg(sps.norm.rvs(size=size))
plt.hlines(s_conf[0], a_conf[0], a_conf[1], linewidth=2.5, color='tomato')
plt.hlines(s_conf[1], a_conf[0], a_conf[1], linewidth=2.5, color='tomato')
plt.vlines(a_conf[0], s_conf[0], s_conf[1], linewidth=2.5, color='tomato')
plt.vlines(a_conf[1], s_conf[0], s_conf[1], linewidth=2.5, color='tomato')
plt.ylabel('Выборочная дисперсия')
plt.xlabel('Выборочное среднее')
plt.title('Доверительная область')
plt.grid()
plt.show()
alpha = 0.05 # Уровень значимости
# Считаем параметры для выборок четырех разных размеров
stats = np.zeros((4, 4))
for i, size in enumerate([5, 15, 30, 50]):
t = sps.bernoulli(p=0.5).rvs(size=size).sum() # Сумма бернуллиевских случайных величин
pvalue = sps.binom(n=size, p=0.5).sf(t) # Статистика T имеет биномиальное распределение
c_alpha = sps.binom(n=size, p=0.5).ppf(1 - alpha)
stats[i, :] = np.array([size, t, c_alpha, pvalue])
pd.DataFrame(data=stats, columns=['$n$', '$t$', '$c_{\\alpha}$', 'p-value'])
# В двух случаях строим для 10 выборок табличку
for size, p in [(5, 0.75), (100000, 0.51)]:
stats = np.zeros((10, 5))
for i in np.arange(10):
t = sps.bernoulli(p=p).rvs(size=size).sum()
pvalue = sps.binom(n=size, p=0.5).sf(t)
c_alpha = sps.binom(n=size, p=0.5).ppf(1 - alpha)
rejected = int(t > c_alpha)
stats[i, :] = np.array([size, p, t, pvalue, rejected])
print(pd.DataFrame(data=stats, columns=['size', 'prob', 'stat', 'p-value', 'rej']))
p_conf = 0.75 # Если больше, то уверены в том, что действительно неудачи связаны с черной кошкой.
beta = 0.8 # Магическая константа с семинара и из условия
grid = np.linspace(0.501, 0.999, 500)
plt.figure(figsize=(20, 8))
for size in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]:
power = sps.binom(n=size, p=grid).sf(sps.binom(n=size, p=0.5).ppf(1 - alpha))
plt.plot(grid, power, label='n={}'.format(size))
plt.legend()
plt.vlines(p_conf, 0, 1)
plt.hlines(beta, 0.5, 1)
plt.title('Мощность критериев')
plt.ylabel('$\\beta(\\theta, X)$')
plt.xlabel('$\\theta$')
plt.grid()
plt.show()
size_conf = 27 # Оптимальные размер выборки
# В двух случаях строим для 10 выборок табличку
for p in [0.6, 0.85]:
stats = np.zeros((10, 6))
for i in np.arange(10):
t = sps.bernoulli(p=p).rvs(size=size_conf).sum()
pvalue = sps.binom(n=size_conf, p=0.5).sf(t)
c_alpha = sps.binom(n=size_conf, p=0.5).ppf(1 - alpha)
rejected = int(t > c_alpha)
stats[i, :] = np.array([size_conf, p, t, c_alpha, pvalue, rejected])
print(pd.DataFrame(data=stats, columns=['size', 'prob', 'stat', 'c_alpha', 'p-value', 'rej']))
alpha = 0.05 # Уровень значимости
sample = [3.4, 0.5, 0.2, 1, 1.7, 1, 1, 3.9, 4.1, 3.6, 0.5, 0.7,
1.2, 0.5, 0.5, 1.9, 0.5, 0.3, 1.5, 1.9, 1.9, 2.4, 1.2,
2.9,3.2, 1.2, 1.7, 2.9, 1.5, 2.4, 3.4, 0.7, 1.2, 1.7,
1.5, 3.2, 3.9, 1.7, 2.7, 1, 1.5, 1.5, 2.9, 0.7, 2.2, 2.2,
1.9, 1.7, 1.7, 1.9, 1.9, 3.9, 1.2, 1.5, 2.4, 3.3, 2.9,
2.2, 4.6, 3.9, 2.2, 1.2, 3.6, 3.2, 2.2, 2.9, 3.4, 2.4,
2.9, 3.2, 1.7, 1.7, 2.2, 2.7, 3.2, 3.2, 2.9, 1.9, 1.7,
2.2, 1.7, 1.2, 1.2, 1.9, 0.7, 2.2, 1.5, 1.5, 2.7, 4.9,
3.2, 0.7, 2.2, 3.6, 3.6, 1.7, 3.2, 3.4, 1, 0.5, 3.4, 5.3,
4.4, 6.8, 4.6, 3.4, 2.2, 2.2, 2.7, 2.2, 1.2, 1.7, 1.9,
1.2, 1.2, 3.6, 2.4, 1, 2.9, 3.6, 1.7, 2.8] # Выборка из второго практикума
k = 2.00307 # Параметры из второго практикума
l = 2.53379
sps.kstest(sample, sps.weibull_min(c=k, scale=l).cdf)
# Код из задачи 3а прака 2
sample = sps.norm.rvs(size=(200, 300))
estimator = sample.cumsum(axis=1) / (np.ones(200).reshape(200, 1) @ np.linspace(1, 300, 300).reshape(1, 300))
stat = estimator * np.linspace(1, 300, 300) ** 0.5
sample = stat[:, -1]
# Проверка критерием Колмогорова
sps.kstest(sample, 'norm')
# Код из задачи 3б прака 2
sample = sps.poisson.rvs(mu=1, size=(200, 300))
estimator = sample.cumsum(axis=1) / (np.ones(200).reshape(200, 1) @ np.linspace(1, 300, 300).reshape(1, 300))
stat = (estimator - 1) * np.linspace(1, 300, 300) ** 0.5
sample = stat[:, -1]
# Проверка критерием Колмогорова
sps.kstest(sample, 'norm')
# Код из задачи 4 прака 2
sample = sps.uniform.rvs(size=(200, 300))
estimator = np.maximum.accumulate(sample, axis=1)
stat = (1 - estimator) * np.linspace(1, 300, 300)
sample = stat[:, -1]
# Проверка критерием Колмогорова
sps.kstest(sample, 'expon')
# Я загурзил файл локально
data = np.array(list(csv.reader(open('ice_cream.txt', 'r'), delimiter='\t')))
source = data[1:, :].astype(float)
# Переводим в Цельсий
source[:, 4] = (source[:, 4] - 32) / 1.8
source[:, 5] = (source[:, 5] - 32) / 1.8
# Размер выборки и число параметров
n, k = source.shape[0], 2
# Отклик и регрессор
Y = source[:, 1]
X = np.zeros((n, k))
X[:, 0] = np.ones(n)
X[:, 1] = source[:, 4]
# Обучаем модель
model = LinearRegression()
model.fit(X, Y)
# Получаем остатки
errors = Y - model.predict(X)
# Проверяем критерием Колмогороова.
sps.kstest(errors, sps.norm(scale=model.sigma_sq ** 0.5).cdf)
from statsmodels.distributions.empirical_distribution import ECDF
source = np.array(list(csv.reader(open('friends.csv', 'r'))))
sample = np.sort(source[:, 1].astype(int))
grid = np.linspace(0, sample.max() + 1, 1000)
sps.kstest(sample, sps.rayleigh(scale=180).cdf)
plt.figure(figsize=(20, 5))
plt.plot(grid, ECDF(sample)(grid), color='red', label='ЭФР')
plt.plot(grid, sps.rayleigh(scale=180).cdf(grid), color='blue', label='ФР')
plt.legend()
plt.title('Число друзей моих друзей в ВК')
plt.grid()
plt.show()
plt.figure(figsize=(20, 5))
plt.plot(grid, sps.rayleigh(scale=180).pdf(grid), color='blue', label='Плотность')
plt.legend()
plt.title('Плотность распределения Рэлея')
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Линейная регрессия
Step2: Загрузите данные о потреблении мороженного в зависимости от температуры воздуха и цены (файл ice_cream.txt).
Step3: Вывод. Действительно, график тот же (с поправкой на пересчет температуры). Линейная регрессия неплохо, но не идеально приближает зависимость потребления мороженого в зависимости от температуры. Для более точного вывода стоит посчитать доверительный интервал.
Step4: Вывод. В разрезе разных лет также можно уследить, что линейная зависимсоть неплохо приближает реальную. Номер года нельзя брать как признак, так это больше характеризация класса каких-то значений, в то время линейная регрессия работает со значениями, от которых зависимость линейная, численная
Step5: Вывод. При разделении выборки на три части результаты в какой-то степени стали лучше. Кроме того, такое разделение позволяет заметить, что с при увеличении года в целом потребляют больше мороженого, и что прирост литров мороженого на один градус возрастает при увеличении года.
Step6: Вывод. Похоже, не все признаки стоит учитывать. Некоторые значения по модулю очень маленькие по сравнению со значениями, характерными для соответствующих признаков, что их лучше не учитывать. Например, это номер измерения (theta_2) или температура в будущем месяце.(theta_5).
Step7: Вывод. Результаты выглядят более естественно, однако теперь кажется, что при 30-40 градусах люди потребляют невероятно много мороженого, что, скорее всего, неправда (все-таки есть какой-то лимит). Кроме того, значения последних двух параметров очень малы, что говорит о малом влиянии этих параметров при малых температурах. Возможно, линейная модель и так достаточно хороша. Однако в нашем случае будет видно, что при отрицательных температурах потребление мороженого падает стремительно. Наверное, в этом есть некоторая правда.
Step8: Вывод. Как говорилось на семинаре, высокий индекс обусловленности (больше 30) говорит о мультиколлинеарности, которая ведет к переобучению. Видимо, мы перестарались.
Step9: Примените реализованный отбор признаков к датасетам
Step10: Вывод. Признак 5 (Froude number) наиболее полезный признак, остальные встречаются как-то рандомно (кроме 4, который почти не встречается, это Length-beam ratio).
Step11: Вывод. Первые 3 и последние 3 признака самые полезные.
Step12: Вывод. При увеличении размера выборки площадь доверительной область (она, очевидно, измерима даже по Жордану) сильно уменьшается. Это говорит о том, что чем больше значений получено, тем точнее с заданным уровнем доверия можно оценить параметр.
Step13: Вывод. Если $t/n$ сильно больше 0.5 (скажем, 0.67), то p-value выходит меньше 0.05, и гипотеза отвергается. Критерий работает.
Step14: Вывод. Выходит, почти всегда на малых выборках даже при большом $p$ мы гипотезу не отвергнем, а при больших даже при $p$, близких к 0.5, гипотеза будет отвергнута. Похоже на ошибки II и I рода соответственно.
Step15: Вывод. Оптимальный в некотором смысле размер выборки получается примерно 25–30, на пересечении $\theta=p^*=0.75$ и $\beta(\theta, X)=\beta=0.8$. Выберем 27.
Step16: Вывод. При выбранном значении $p^$ и подобранном оптималном значении выборки если $p < p^$, то гипотеза не отвергается почти всегда, а при $p > p^*$ гипотеза отвергается почти всегда.
Step17: Вывод. Выборка согласуется с распределением Вейбулла (так как pvalue больше 0.05, гипотеза не отвергается).
Step18: Вывод. Выборка согласуется со стандартным нормальным распределением (так как pvalue больше 0.05, гипотеза не отвергается).
Step19: Вывод. Выборка согласуется со стандартным нормальным распределением (так как pvalue больше 0.05, гипотеза не отвергается).
Step20: Вывод. Выборка согласуется со экспоненциальным распределением с параметром 1 (так как pvalue больше 0.05, гипотеза не отвергается).
Step21: Вывод. Распределение согласуется с нормальным.
Step22: В первой практике я говорил, что не могу сделать вывод о распределении на основании такого графика. Может быть, сейчас получится. Выдвинем гипотезу, что распределение числа друзей моих друзей есть распределение Рэлея с параметром масштаба 180. Почему нет, имеем право.
|
7,510
|
<ASSISTANT_TASK:>
Python Code:
from wanderer import wanderer
def clipOutlier2D(arr2D, nSig=10):
arr2D = arr2D.copy()
medArr2D = median(arr2D,axis=0)
sclArr2D = np.sqrt(((scale.mad(arr2D)**2.).sum()))
outliers = abs(arr2D - medArr2D) > nSig*sclArr2D
inliers = abs(arr2D - medArr2D) <= nSig*sclArr2D
arr2D[outliers] = median(arr2D[inliers],axis=0)
return arr2D
rcParams['figure.dpi'] = 150
rcParams['image.interpolation'] = 'None'
rcParams['image.cmap'] = 'Blues_r'
rcParams['axes.grid'] = False
from os import environ
planetName = 'GJ1214'
planetDirectory = '/Research/Planets/'
channel = 'ch2'
# channel = 'ch2/'
dataSub = 'bcd/'
dataDir = environ['HOME'] + planetDirectory + planetName + '/data/raw/' + channel + '/big/'
AORs = []
for dirNow in glob(dataDir + '/*'):
AORs.append(dirNow.split('/')[-1])
fileExt = '*bcd.fits'
uncsExt = '*bunc.fits'
print(dataDir)
len(AORs)
iAOR = 0
AORNow = AORs[iAOR]
loadfitsdir = dataDir + AORNow + '/' + channel + '/' + dataSub
print(loadfitsdir)
nCores = cpu_count()
fitsFilenames = glob(loadfitsdir + fileExt);print(len(fitsFilenames))
uncsFilenames = glob(loadfitsdir + uncsExt);print(len(uncsFilenames))
header_test = fits.getheader(fitsFilenames[0])
print('AORLABEL:\t{}\nNum Fits Files:\t{}\nNum Unc Files:\t{}'.format\
(header_test['AORLABEL'], len(fitsFilenames), len(uncsFilenames)))
ppm = 1e6
y,x = 0,1
yguess, xguess = 15., 15. # Specific to Spitzer circa 2010 and beyond
filetype = 'bcd.fits' # Specific to Spitzer Basic Calibrated Data
method = 'median'
print('Initialize an instance of `wanderer` as `example_wanderer_median`\n')
example_wanderer_median = wanderer(fitsFileDir=loadfitsdir, filetype=filetype, telescope='Spitzer',
yguess=yguess, xguess=xguess, method=method, nCores=nCores)
example_wanderer_median.AOR = AORNow
example_wanderer_median.planetName = planetName
example_wanderer_median.channel = channel
print('Load Data From Fits Files in ' + loadfitsdir + '\n')
# exarymple_wanderer_median.load_data_from_fits_files()
example_wanderer_median.spitzer_load_fits_file(outputUnits='electrons')#(outputUnits='muJ_per_Pixel')
print('Skipping Load Data From Save Files in ' + loadfitsdir + '\n')
# example_wanderer_median.load_data_from_save_files(savefiledir='./SaveFiles/', \
# saveFileNameHeader='Example_Wanderer_Median_', saveFileType='.pickle.save')
example_wanderer_median.imageCube[np.where(isnan(example_wanderer_median.imageCube))] = \
np.nanmedian(example_wanderer_median.imageCube)
print('Find, flag, and NaN the "Bad Pixels" Outliers' + '\n')
example_wanderer_median.find_bad_pixels()
print('Fit for All Centers: Flux Weighted, Gaussian Fitting, Gaussian Moments, Least Asymmetry' + '\n')
# example_wanderer_median.fit_gaussian_centering()
example_wanderer_median.fit_flux_weighted_centering()
# example_wanderer_median.fit_least_asymmetry_centering()
# example_wanderer_median.fit_all_centering() # calling this calls least_asymmetry, which does not work :(
# start = time()
# example_wanderer_median.mp_fit_gaussian_centering(subArray=6)
# print('Operation took {} seconds with {} cores'.format(time()-start, example_wanderer_median.nCores))
# centering_df_copy = example_wanderer_median.centering_df
# example_wanderer_median.centering_df = {}
start = time()
example_wanderer_median.mp_lmfit_gaussian_centering(subArraySize=6, recheckMethod=None, median_crop=False)
print('Operation took {} seconds with {} cores'.format(time()-start, example_wanderer_median.nCores))
nSig = 10.1
medY = median(example_wanderer_median.centering_GaussianFit.T[y])
medX = median(example_wanderer_median.centering_GaussianFit.T[x])
stdY = std(example_wanderer_median.centering_GaussianFit.T[y])
stdX = std(example_wanderer_median.centering_GaussianFit.T[x])
ySig = 4
xSig = 4
outliers = (((example_wanderer_median.centering_GaussianFit.T[y] - medY)/(ySig*stdY))**2 + \
((example_wanderer_median.centering_GaussianFit.T[x] - medX)/(xSig*stdX))**2) > 1
ax = figure().add_subplot(111)
cx, cy = example_wanderer_median.centering_GaussianFit.T[x],example_wanderer_median.centering_GaussianFit.T[y]
ax.plot(cx,cy,'.',ms=1)
ax.plot(cx[outliers],cy[outliers],'.',ms=1)
# ax.plot(median(cx), median(cy),'ro',ms=1)
ax.set_xlim(medX-nSig*stdX,medX+nSig*stdX)
ax.set_ylim(medY-nSig*stdY,medY+nSig*stdY)
from sklearn.cluster import DBSCAN
dbs = DBSCAN(n_jobs=-1, eps=0.2, leaf_size=10)
dbsPred = dbs.fit_predict(example_wanderer_median.centering_GaussianFit)
dbs_options = [k for k in range(-1,100) if (dbsPred==k).sum()]
dbs_options
fig = figure(figsize=(6,6))
ax = fig.add_subplot(111)
medGaussCenters = median(example_wanderer_median.centering_GaussianFit,axis=0)
sclGaussCenters = scale.mad(example_wanderer_median.centering_GaussianFit)
sclGaussCenterAvg = np.sqrt(((sclGaussCenters**2.).sum()))
yctrs = example_wanderer_median.centering_GaussianFit.T[y]
xctrs = example_wanderer_median.centering_GaussianFit.T[x]
nSigmas = 5
for nSig in linspace(1,10,10):
CircularAperture(medGaussCenters[::-1],nSig*sclGaussCenterAvg).plot(ax=ax)
for dbsOpt in dbs_options:
ax.plot(xctrs[dbsPred==dbsOpt], yctrs[dbsPred==dbsOpt],'.',zorder=0, ms=1)
npix = 3
stillOutliers = np.where(abs(example_wanderer_median.centering_GaussianFit - medGaussCenters) > 4*sclGaussCenterAvg)[0]
print(len(stillOutliers))
# for o in stillOutliers:
# figure()
# imshow(example_wanderer_median.imageCube[o][16-npix:16+npix+1,16-npix:16+npix+1])
dbsClean = 0
dbsKeep = (dbsPred == dbsClean)
nCores = example_wanderer_median.nCores
start = time()
example_wanderer_median.mp_measure_background_circle_masked()
print('CircleBG took {} seconds with {} cores'.format(time() - start, nCores))
start = time()
example_wanderer_median.mp_measure_background_annular_mask()
print('AnnularBG took {} seconds with {} cores'.format(time() - start, nCores))
start = time()
example_wanderer_median.mp_measure_background_KDE_Mode()
print('KDEUnivBG took {} seconds with {} cores'.format(time() - start, nCores))
start = time()
example_wanderer_median.mp_measure_background_median_masked()
print('MedianBG took {} seconds with {} cores'.format(time() - start, nCores))
fig = figure(figsize=(20,10))
ax = fig.add_subplot(111)
ax.plot(example_wanderer_median.timeCube, example_wanderer_median.background_CircleMask,'.',alpha=0.2)
ax.plot(example_wanderer_median.timeCube, example_wanderer_median.background_Annulus,'.',alpha=0.2)
ax.plot(example_wanderer_median.timeCube, example_wanderer_median.background_MedianMask,'.',alpha=0.2)
ax.plot(example_wanderer_median.timeCube, example_wanderer_median.background_KDEUniv,'.',alpha=0.2)
ax.axvline(example_wanderer_median.timeCube.min()-.01+0.02)
ax.set_ylim(-25,100)
# ax.set_xlim(example_wanderer_median.timeCube.min()-.01,example_wanderer_median.timeCube.min() + .05)
example_wanderer_median.measure_effective_width()
print(example_wanderer_median.effective_widths.mean(), sqrt(example_wanderer_median.effective_widths).mean())
vrad_dist = example_wanderer_median.quadrature_widths - np.median(example_wanderer_median.quadrature_widths)
vrad_dist = clipOutlier2D(vrad_dist, nSig=5)
ax = figure().add_subplot(111)
ax.hist(vrad_dist, bins=example_wanderer_median.nFrames//100);
ax.hist(0.75*vrad_dist, bins=example_wanderer_median.nFrames//100);
ax.hist(0.5*vrad_dist, bins=example_wanderer_median.nFrames//100);
ax.hist(0.25*vrad_dist, bins=example_wanderer_median.nFrames//100);
# ax.set_xlim(-.25,.25);
print('Pipeline took {} seconds thus far'.format(time() - startFull))
print('Iterating over Background Techniques, Centering Techniques, Aperture Radii' + '\n')
centering_choices = ['Gaussian_Fit']#, 'Gaussian_Mom', 'FluxWeighted']#, 'LeastAsymmetry']
background_choices = ['AnnularMask']#example_wanderer_median.background_df.columns
staticRads = np.arange(1, 6,0.5)#[1.0 ]# aperRads = np.arange(1, 6,0.5)
varRads = [0.0, 0.25, 0.50, 0.75, 1.0, 1.25, 1.50]#[None]#
vrad_dist = example_wanderer_median.quadrature_widths - np.median(example_wanderer_median.quadrature_widths)
vrad_dist = clipOutlier2D(vrad_dist, nSig=5)
for staticRad in tqdm_notebook(staticRads, total=len(staticRads), desc='Static'):
for varRad in tqdm_notebook(varRads, total=len(varRads), desc='Variable'):
startMPFlux = time()
example_wanderer_median.mp_compute_flux_over_time_varRad(staticRad, varRad, centering_choices[0], \
background_choices[0], useTheForce=True)
# print('Flux Measurements took {} seconds for sRad {} and vRad {}'.format(time()-startMPFlux,staticRad,varRad))
example_wanderer_median.mp_compute_flux_over_time_betaRad()
print('Entire Pipeline took {} seconds'.format(time() - startFull))
example_wanderer_median.mp_DBScan_Flux_All()
inlier_master = array(list(example_wanderer_median.inliers_Phots.values())).mean(axis=0) == 1.0
((~inlier_master).sum() / inlier_master.size)*100
example_wanderer_median.extract_PLD_components()
example_wanderer_median.mp_DBScan_PLD_All()
print('Saving `example_wanderer_median` to a set of pickles for various Image Cubes and the Storage Dictionary')
savefiledir = environ['HOME']+'/Research/Planets/'+planetName+'/ExtracedData/' + channel
saveFileNameHeader = planetName+'_'+ AORNow +'_Median'
saveFileType = '.joblib.save'
if not path.exists(environ['HOME']+'/Research/Planets/'+planetName+'/ExtracedData/'):
mkdir(environ['HOME']+'/Research/Planets/'+planetName+'/ExtracedData/')
if not path.exists(savefiledir):
print('Creating ' + savefiledir)
mkdir(savefiledir)
print()
print('Saving to ' + savefiledir + saveFileNameHeader + saveFileType)
print()
example_wanderer_median.save_data_to_save_files(savefiledir=savefiledir, \
saveFileNameHeader=saveFileNameHeader, \
saveFileType=saveFileType)
quad_width= example_wanderer_median.quadrature_widths.values
vrad_dist = quad_width - np.median(quad_width)
vrad_dist = clipOutlier2D(vrad_dist, nSig=5)
vrad_dist_med = np.median(vrad_dist)
color_cycle = rcParams['axes.prop_cycle'].by_key()['color']
ax = figure().add_subplot(111)
for key in example_wanderer_median.flux_TSO_df.keys():
staticRad = float(key.split('_')[-2])
varRad = float(key.split('_')[-1])
aperRad = staticRad + varRad*vrad_dist_med
colorNow = color_cycle[int(varRad*4)]
# if aperRad > 1.5 and aperRad < 3.5:
ax.scatter(aperRad, scale.mad(np.diff(example_wanderer_median.flux_TSO_df[key])), \
color=colorNow, zorder=int(varRad*4))
for varRad in [0.,0.25, 0.5, 0.75, 1.0, 1.25, 1.5]:
colorNow = color_cycle[int(varRad*4)]
ax.scatter([],[], color=colorNow, label=varRad)
ax.set_xlabel('StaticRad + Average(varRad)')
ax.set_ylabel('MAD( Diff ( Flux ) )')
ax.legend(loc=0)
# ax.set_ylim(292.5,294)
# ax.set_xlim(2.9, 3.1)
print('Entire Pipeline took {} seconds'.format(time() - startFull))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As an example, Spitzer data is expected to be store in the directory structure
Step2: Load Wanderer Class
Step3: Load Stored Instance from Save Files
Step4: Double check for NaNs
Step5: Identifier Strong Outliers
Step6: TEST
Step7: Create Beta Variable Radius
|
7,511
|
<ASSISTANT_TASK:>
Python Code:
%pylab notebook
r1 = 0.641 # Stator resistance
x1 = 1.106 # Stator reactance
r2 = 0.332 # Rotor resistance
x2 = 0.464 # Rotor reactance
xm = 26.3 # Magnetization branch reactance
v_phase = 460 / sqrt(3) # Phase voltage
n_sync = 1800 # Synchronous speed (r/min)
w_sync = n_sync * 2*pi/60 # Synchronous speed (rad/s)
v_th = v_phase * ( xm / sqrt(r1**2 + (x1 + xm)**2) )
z_th = ((1j*xm) * (r1 + 1j*x1)) / (r1 + 1j*(x1 + xm))
r_th = real(z_th)
x_th = imag(z_th)
s = linspace(0, 1, 50) # Slip
s[0] = 0.001 # avoid divide-by-zero problems
nm = (1 - s) * n_sync # mechanical speed
t_ind1 = ((3 * v_th**2 * r2/s) /
(w_sync * ((r_th + r2/s)**2 + (x_th + x2)**2)))
t_ind2 = ((3 * v_th**2 * 2*r2/s) /
(w_sync * ((r_th + 2*r2/s)**2 + (x_th + x2)**2)))
rc('text', usetex=True) # enable LaTeX commands for plot
plot(nm, t_ind2,'k--',
nm, t_ind1,'b',
lw=2)
xlabel(r'$\mathbf{n_{m}}\ [rpm]$')
ylabel(r'$\mathbf{\tau_{ind}}\ [Nm]$')
title ('Induction motor torque-speed characteristic')
legend ((r'Doubled $R_{2}$','Original $R_{2}$'), loc = 3);
grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, initialize the values needed in this program.
Step2: Calculate the Thevenin voltage and impedance from Equations 7-41a
Step3: Now calculate the torque-speed characteristic for many slips between 0 and 1.
Step4: Calculate torque for original rotor resistance using
Step5: Calculate torque for doubled rotor resistance
Step6: Plot the torque-speed curve
|
7,512
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import IFrame
IFrame('https://plot.ly/~empet/13475/', width=800, height=350)
IFrame('https://plot.ly/~empet/13503/', width=600, height=475)
IFrame('https://plot.ly/~empet/13497/', width=550, height=550)
IFrame('https://plot.ly/~empet/13479/', width=825, height=950)
IFrame('https://plot.ly/~empet/13481/', width=900, height=950)
import numpy as np
import plotly.plotly as py
import plotly.graph_objs as go
from plotly import tools as tls
pts = np.loadtxt('Data/data-file.txt')
x, y, z = zip(*pts)
points = go.Scatter3d(mode='markers',
name='',
x =x,
y= y,
z= z,
marker=dict(size=2, color='#458B00'))
simplexes = go.Mesh3d(alphahull =10.0,
name = '',
x =x,
y= y,
z= z,
color='#90EE90',
opacity=0.15)
axis = dict(showbackground=True,
backgroundcolor="rgb(245, 245, 245)",
gridcolor="rgb(255, 255, 255)",
gridwidth=2,
zerolinecolor="rgb(255, 255, 255)",
tickfont=dict(size=11),
titlefont =dict(size=12))
x_style = dict(axis, range=[-2.85, 4.25], tickvals=np.linspace(-2.85, 4.25, 5)[1:].round(1))
y_style = dict(axis, range=[-2.65, 1.32], tickvals=np.linspace(-2.65, 1.32, 4)[1:].round(1))
z_style = dict(axis, range=[-3.67,1.4], tickvals=np.linspace(-3.67, 1.4, 5).round(1))
layout = go.Layout(title='Alpha shape of a set of 3D points. Alpha=0.1',
width=500,
height=500,
scene=dict(xaxis=x_style,
yaxis=y_style,
zaxis=z_style))
fig = go.FigureWidget(data=[points, simplexes], layout=layout)
#fig
fig = go.FigureWidget(data=[points, simplexes], layout=layout)
#py.plot(fig, filename='3D-AlphaS-ex')
IFrame('https://plot.ly/~empet/13499/', width=550, height=550)
from scipy.spatial import Delaunay
def sq_norm(v): #squared norm
return np.linalg.norm(v)**2
def circumcircle(points,simplex):
A = [points[simplex[k]] for k in range(3)]
M = [[1.0]*4]
M += [[sq_norm(A[k]), A[k][0], A[k][1], 1.0 ] for k in range(3)]
M = np.asarray(M, dtype=np.float32)
S = np.array([0.5*np.linalg.det(M[1:, [0,2,3]]), -0.5*np.linalg.det(M[1:, [0,1,3]])])
a = np.linalg.det(M[1:, 1:])
b = np.linalg.det(M[1:, [0,1,2]])
return S/a, np.sqrt(b/a + sq_norm(S)/a**2) #center=S/a, radius=np.sqrt(b/a+sq_norm(S)/a**2)
def get_alpha_complex(alpha, points, simplexes):
#alpha is the parameter for the alpha shape
#points are given data points
#simplexes is the list of indices in the array of points
#that define 2-simplexes in the Delaunay triangulation
return filter(lambda simplex: circumcircle(points,simplex)[1] < alpha, simplexes)
pts = np.loadtxt('Data/data-ex-2d.txt')
tri = Delaunay(pts)
colors = ['#C0223B', '#404ca0', 'rgba(173,216,230, 0.5)']# colors for vertices, edges and 2-simplexes
def Plotly_data(points, complex_s):
#points are the given data points,
#complex_s is the list of indices in the array of points defining 2-simplexes(triangles)
#in the simplicial complex to be plotted
X = []
Y = []
for s in complex_s:
X += [points[s[k]][0] for k in [0,1,2,0]] + [None]
Y += [points[s[k]][1] for k in [0,1,2,0]] + [None]
return X, Y
def make_trace(x, y, point_color=colors[0], line_color=colors[1]):# define the trace
#for an alpha complex
return go.Scatter(mode='markers+lines', #vertices and
#edges of the alpha-complex
name='',
x=x,
y=y,
marker=dict(size=6.5, color=point_color),
line=dict(width=1.25, color=line_color))
figure = tls.make_subplots(rows=1, cols=2,
subplot_titles=('Delaunay triangulation', 'Alpha shape, alpha=0.15'),
horizontal_spacing=0.1,
)
title = 'Delaunay triangulation and Alpha Complex/Shape for a Set of 2D Points'
figure.layout.update(title=title,
font=dict(family="Open Sans, sans-serif"),
showlegend=False,
hovermode='closest',
autosize=False,
width=800,
height=460,
margin=dict(l=65,
r=65,
b=85,
t=120));
axis_style = dict(showline=True,
mirror=True,
zeroline=False,
showgrid=False,
showticklabels=True,
range=[-0.1,1.1],
tickvals=[0, 0.2, 0.4, 0.6, 0.8, 1.0],
ticklen=5
)
for s in range(1,3):
figure.layout.update({'xaxis{}'.format(s): axis_style})
figure.layout.update({'yaxis{}'.format(s): axis_style})
alpha_complex = list(get_alpha_complex(0.15, pts, tri.simplices))
X, Y = Plotly_data(pts, tri.simplices)# get data for Delaunay triangulation
figure.append_trace(make_trace(X, Y), 1, 1)
X, Y = Plotly_data(pts, alpha_complex)# data for alpha complex
figure.append_trace(make_trace(X, Y), 1, 2)
shapes = []
for s in alpha_complex: #fill in the triangles of the alpha complex
A = pts[s[0]]
B = pts[s[1]]
C = pts[s[2]]
shapes.append(dict(path=f'M {A[0]}, {A[1]} L {B[0]}, {B[1]} L {C[0]}, {C[1]} Z',
fillcolor='rgba(173, 216, 230, 0.5)',
line=dict(color=colors[1], width=1.25),
xref='x2',
yref='y2'
))
figure.layout.shapes=shapes
py.plot(figure, filename='2D-AlphaS-ex', width=850)
IFrame('https://plot.ly/~empet/13501', width=800, height=460)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If $T$ is the set of points defining a $k$-simplex, then any proper subset of $T$ defines an $\ell$-simplex, $\ell<k$.
Step2: Triangular meshes used in computer graphics are examples of simplicial complexes.
Step3: Alpha shape of a finite set of points
Step4: We notice that the Delaunay triangulation has as boundary a convex set (it is a triangulation of the convex hull
Step5: We notice in the subplots above that as alphahull increases, i.e. $\alpha$ decreases, some parts of the alpha shape shrink and
Step6: Load data
Step7: Define two traces
Step8: Generating the alpha shape of a set of 2D points
Step9: Compute the circumcenter and circumradius of a triangle (see their definitions here)
Step10: Filter out the Delaunay triangulation to get the $\alpha$-complex
Step11: Get data for the Plotly plot of a subcomplex of the Delaunay triangulation
|
7,513
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import mne
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif'
tmin, tmax = 0, 20 # use the first 20s of data
# Setup for reading the raw data (save memory by cropping the raw data
# before loading it)
raw = mne.io.read_raw_fif(raw_fname)
raw.crop(tmin, tmax).load_data()
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # bads + 2 more
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
# Pick a subset of channels (here for speed reason)
selection = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
raw.notch_filter(np.arange(60, 241, 60), picks=picks, filter_length='auto',
phase='zero')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
# low pass filtering below 50 Hz
raw.filter(None, 50., h_trans_bandwidth='auto', filter_length='auto',
phase='zero')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
raw.filter(1., None, l_trans_bandwidth='auto', filter_length='auto',
phase='zero')
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks, average=False)
# band-pass filtering in the range 1 Hz - 50 Hz
raw.filter(1, 50., l_trans_bandwidth='auto', h_trans_bandwidth='auto',
filter_length='auto', phase='zero')
raw.resample(100, npad="auto") # set sampling frequency to 100Hz
raw.plot_psd(area_mode='range', tmax=10.0, picks=picks)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Removing power-line noise with notch filtering
Step2: Removing power-line noise with low-pass filtering
Step3: High-pass filtering to remove slow drifts
Step4: To do the low-pass and high-pass filtering in one step you can do
Step5: Downsampling and decimation
|
7,514
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
from __future__ import print_function
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from math import sin, pi, tan
def lbf2N(w):
return w*4.448
def deg2rad(d):
return d*pi/180
def in2mm(i):
return i*25.4
def Nm2lbfin(nm):
return nm*8.851
# unknown table of values for motors
motor_t = [.22, 12.1, 19.8, 28.1, 38, 49.6, 60.1, 69.1, 81.3, 91.4, 100.7] # lbf-in
motor_c = [6.2, 10.2, 12.9, 15.6, 19.5, 23.6, 27.9, 31.3, 36.8, 41.4, 46.1] # Amps
plt.plot(motor_c, motor_t)
plt.grid(True)
plt.xlabel('Current [A]')
plt.ylabel('Torque [lbf-in]')
plt.title('NPC-2212 Motor Performance')
def force_total(inc):
Calculate the total force the wheels must overcome. This
is a function of both gravity pulling the robot down the ramp
and the frictional force between the wheels and the floor.
Returns the force ONE motor must overcome to not slide down the ramp.
w_N = lbf2N(90) # weight in N
# http://www.engineeringtoolbox.com/rolling-friction-resistance-d_1303.html
# 0.03 car tires on cobbles - large worn
frict_coeff = 0.03 # coefficient of friction, guess based on searching internet
force_fric = w_N*frict_coeff*2 # 2 leg wheels on the floor
force_incline = w_N*sin(deg2rad(inc)) # gravity pulling the robot down the ramp
return (force_fric + force_incline)/2 # divide by 2 because 2 leg motors
def torque(f):
r_wheel = in2mm(2.5) # wheel radius in mm
return f*r_wheel/1000
f = force_total(0)
t = torque(f)
print('force', f, 'N')
print('torque', t, ' Nm or ', Nm2lbfin(t), 'lbf-in')
f = force_total(7.2)
t = torque(f)
print('force', f, 'N')
print('torque', t, ' Nm or ', Nm2lbfin(t), 'lbf-in')
def wrapper(angles):
This is just a wrapper on the above functions so it is easier to find
the needed torque.
Arguments:
angles: an array of angles in degrees
Returns an array of torques
# w_N = lbf2N(70) # robot weight in N
# frict_coeff = 0.15
# r_wheel = in2mm(2.5) # wheel radius in mm
t = [] # output array of torques
for a in angles:
ff = force_total(a)
tt = torque(ff)
t.append(Nm2lbfin(tt))
return t
# plot some stuff
plt.subplot(1, 2, 1)
plt.plot(motor_c, motor_t)
plt.grid(True)
plt.xlabel('Current [A]')
plt.ylabel('Torque [lbf-in]')
plt.title('NPC-2212 Motor Performance')
plt.subplot(1,2,2)
x = range(0, 21)
y = wrapper(x)
plt.plot(x, y)
plt.grid(True)
plt.xlabel('Incline [degree]')
plt.ylabel('Torque [lbf-in]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: R2 isn't expected to do a lot of up hill climbing. For reference, power wheelchair ramp slope is 7.2 degrees to bound what kind of slope R2 could encounter in the school.
Step3: Flat Floor
Step4: Wheelchair Ramp
Step6: Range of Incline Angles
|
7,515
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import bigquery
compute_alpha =
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
(
SELECT
RAND() AS splitfield,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
)
WHERE
splitfield < 0.8
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results['alpha'][0]
print(alpha)
compute_rmse =
#standardSQL
SELECT
dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' )
GROUP BY
dataset
bigquery.Client().query(
compute_rmse.replace('ALPHA', str(alpha))).to_dataframe()
train_and_eval_rand =
#standardSQL
WITH
alldata AS (
SELECT
IF (RAND() < 0.8, 'train', 'eval') AS dataset,
arrival_delay,
departure_delay
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX' ),
training AS (
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
alldata
WHERE
dataset = 'train' )
SELECT
MAX(alpha) AS alpha,
dataset,
SQRT(
AVG(
(arrival_delay - alpha * departure_delay) *
(arrival_delay - alpha * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
alldata,
training
GROUP BY
dataset
bigquery.Client().query(train_and_eval_rand).to_dataframe()
compute_alpha =
#standardSQL
SELECT
SAFE_DIVIDE(
SUM(arrival_delay * departure_delay),
SUM(departure_delay * departure_delay)) AS alpha
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
AND ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8
results = bigquery.Client().query(compute_alpha).to_dataframe()
alpha = results['alpha'][0]
print(alpha)
compute_rmse =
#standardSQL
SELECT
IF(ABS(MOD(FARM_FINGERPRINT(date), 10)) < 8, 'train', 'eval') AS dataset,
SQRT(
AVG(
(arrival_delay - ALPHA * departure_delay) *
(arrival_delay - ALPHA * departure_delay)
)
) AS rmse,
COUNT(arrival_delay) AS num_flights
FROM
`bigquery-samples.airline_ontime_data.flights`
WHERE
departure_airport = 'DEN'
AND arrival_airport = 'LAX'
GROUP BY
dataset
print(bigquery.Client().query(
compute_rmse.replace('ALPHA', str(alpha))).to_dataframe().head())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: <h3> Create a simple machine learning model </h3>
Step4: <h3> What is wrong with calculating RMSE on the training and test data as follows? </h3>
Step6: Hint
Step8: <h2> Using HASH of date to split the data </h2>
Step10: We can now use the alpha to compute RMSE. Because the alpha value is repeatable, we don't need to worry that the alpha in the compute_rmse will be different from the alpha computed in the compute_alpha.
|
7,516
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
%pylab inline
pylab.style.use('ggplot')
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/balance-scale/balance-scale.data'
balance_df = pd.read_csv(url, header=None)
balance_df.columns = ['class_name', 'left_weight', 'left_distance', 'right_weight', 'right_distance']
balance_df.head()
counts = balance_df['class_name'].value_counts()
counts.plot(kind='bar')
from sklearn.feature_selection import f_classif
features = balance_df.drop('class_name', axis=1)
names = balance_df['class_name']
# check for negative feature values
features[features < 0].sum(axis=0)
t_stats, p_vals = f_classif(features, names)
feature_importances = pd.DataFrame(np.column_stack([t_stats, p_vals]),
index=features.columns.copy(),
columns=['t_stats', 'p_vals'])
feature_importances.plot(subplots=True, kind='bar')
plt.xticks(rotation=30)
import seaborn as sns
for colname in balance_df.columns.drop('class_name'):
fg = sns.FacetGrid(col='class_name', data=balance_df)
fg = fg.map(pylab.hist, colname)
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.naive_bayes import GaussianNB
estimator = GaussianNB()
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=12345)
f1 = cross_val_score(estimator, features, names, cv=cv, scoring='f1_micro')
pd.Series(f1).plot(title='F1 Score (Micro)', kind='bar')
estimator = GaussianNB()
cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=12345)
f1 = cross_val_score(estimator, features, names, cv=cv, scoring='accuracy')
pd.Series(f1).plot(title='Accuracy', kind='bar')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check for Class Imbalance
Step2: Feature Importances
|
7,517
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import interp1d
x = np.linspace(0, 10, num=11, endpoint=True)
y = np.cos(-x**2/9.0)
f = interp1d(x, y, kind='linear') # default if kind=None
f2 = interp1d(x, y, kind='cubic')
f3 = interp1d(x, y, kind='nearest')
xnew = np.linspace(0, 10, num=41, endpoint=True)
plt.plot(x, y, 'o', xnew, f(xnew), '-', xnew, f2(xnew), '--', xnew, f3(xnew), '.-')
plt.legend(['data', 'linear', 'cubic', 'nearest'], loc='best')
plt.show()
from scipy.interpolate import griddata
def func(x, y):
return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
# A regular grid array of x,y coordinates
grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j] # see np.info(np.mgrid) for an explanation of the 200j !!
np.info(np.mgrid)
# A random sampling within the same area
points = np.random.rand(1000, 2)
values = func(points[:,0], points[:,1])
# Resample from the values at these points onto the regular mesh
grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
plt.subplot(221)
plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower', cmap='jet')
plt.plot(points[:,0], points[:,1], 'k.', ms=1)
plt.title('Original')
plt.subplot(222)
plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower', cmap='jet')
plt.title('Nearest')
plt.subplot(223)
plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower', cmap='jet')
plt.title('Linear')
plt.subplot(224)
plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower', cmap='jet')
plt.title('Cubic')
plt.gcf().set_size_inches(6, 6)
plt.show()
from scipy.interpolate import splrep, splev
x = np.arange(0, 2*np.pi+np.pi/4, 2*np.pi/8)
y = np.sin(x)
tck = splrep(x, y, s=0)
xnew = np.arange(0, 2*np.pi, np.pi/50)
ynew = splev(xnew, tck, der=0)
yder = splev(xnew, tck, der=1)
plt.figure()
plt.plot(x, y, 'x', xnew, ynew, xnew, np.sin(xnew), x, y, 'b')
plt.legend(['Linear', 'Cubic Spline', 'True'])
plt.axis([-0.05, 6.33, -1.05, 1.05])
plt.title('Cubic-spline interpolation')
plt.show()
plt.figure()
plt.plot(xnew, yder, xnew, np.cos(xnew),'--')
plt.legend(['Cubic Spline', 'True'])
plt.axis([-0.05, 6.33, -1.05, 1.05])
plt.title('Derivative estimation from spline')
plt.show()
from scipy.interpolate import bisplrep, bisplev
# Gridded function (at low resolution ... doesn't need to be gridded data here)
x, y = np.mgrid[-1:1:20j, -1:1:20j]
z = (x+y) * np.exp(-6.0*(x*x+y*y))
plt.figure()
plt.pcolor(x, y, z, cmap='jet')
plt.colorbar()
plt.title("Sparsely sampled function.")
plt.show()
xnew, ynew = np.mgrid[-1:1:70j, -1:1:70j]
## Create the spline-representation object tck
tck = bisplrep(x, y, z, s=0)
znew = bisplev(xnew[:,0], ynew[0,:], tck)
plt.figure()
plt.pcolor(xnew, ynew, znew, cmap='jet')
plt.colorbar()
plt.title("Interpolated function.")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1D data
Step2: nD data
Step3: Splines
Step4: 2D splines are also available
|
7,518
|
<ASSISTANT_TASK:>
Python Code:
Sudoku = [ ["*", 3 , 9 , "*", "*", "*", "*", "*", 7 ],
["*", "*", "*", 7 , "*", "*", 4 , 9 , 2 ],
["*", "*", "*", "*", 6 , 5 , "*", 8 , 3 ],
["*", "*", "*", 6 , "*", 3 , 2 , 7 , "*"],
["*", "*", "*", "*", 4 , "*", 8 , "*", "*"],
[ 5 , 6 , "*", "*", "*", "*", "*", "*", "*"],
["*", "*", 5 , 2 , "*", 9 , "*", "*", 1 ],
["*", 2 , 1 , "*", "*", "*", "*", 4 , "*"],
[ 7 , "*", "*", "*", "*", "*", 5 , "*", "*"]
]
def all_different(S):
return { f'{x} != {y}' for x in S for y in S if x < y }
all_different({'a', 'b', 'c'})
def specific_constraints(Puzzle):
Constraints = set()
row = 0
for Row in Puzzle:
row += 1
col = 0
for number in Row:
col += 1
if number != '*':
Constraints.add(f'V{row}{col} == {number}')
return Constraints
specific_constraints(Sudoku)
def sudoku_csp(Puzzle):
R = range(1, 9+1)
S = range(2+1)
Variables = { f'V{c}{r}' for c in R for r in R }
Values = set(R)
Constraints = specific_constraints(Puzzle)
for row in R:
Constraints |= all_different({f'V{row}{col}' for col in R})
for col in R:
Constraints |= all_different({f'V{row}{col}' for row in R})
for rb in S:
for cb in S:
Constraints |= all_different({f'V{3*rb+r+1}{3*cb+c+1}' for r in S for c in S})
return Variables, Values, Constraints
sudoku_csp(Sudoku)
def find_alternative(csp, Solution):
Variables, Values, Constraints = csp
formula = ' or '.join({ f'{var} != {Solution[var]}' for var in Variables })
return Variables, Values, Constraints | { formula }
import ipycanvas as cnv
size = 100
def show_solution(Solution):
canvas = cnv.Canvas(size=(size * 9, size * 9))
canvas.font = '20px sans-serif'
canvas.text_align = 'center'
canvas.text_baseline = 'middle'
for row in range(9):
for col in range(9):
x = col * size
y = row * size
canvas.line_width = 1.0
canvas.stroke_rect(x, y, size, size)
entry = Sudoku[row][col]
if entry == '*':
key = f'V{row+1}{col+1}'
symbol = str(Solution[key])
canvas.fill_style = 'blue'
else:
symbol = str(entry)
canvas.fill_style = 'black'
x += size // 2
y += size // 2
canvas.fill_text(symbol, x, y)
canvas.line_width = 3.0
for row in range(3):
for col in range(3):
x = 3 * col * size
y = 3 * row * size
canvas.stroke_rect(x, y, 3 * size, 3 * size)
canvas.stroke_style = 'black'
canvas.line_width = 6.0
canvas.stroke_rect(0, 0, 9 * size, 9 * size)
display(canvas)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function sudoku_csp(Puzzle) takes a given sudoku Puzzle as its argument and returns a CSP that encodes the given sudoku as a CSP. The variables should have names like $\texttt{V}ij$ where $i,j \in {1,\cdots,9}$. For example, V21 would be the variable describing the first cell in the second row.
Step2: Given a sudoku Puzzle and a Solution for this puzzle, the function find_alternative computes a CSP
Step3: Code to Display the Solution
Step4: The function show_solution prints the solution.
|
7,519
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import TensorFlow
!pip install tf-nightly
import tensorflow as tf
# Helper libraries
import numpy as np
import os
print(tf.__version__)
global_batch_size = 16
# Create a tf.data.Dataset object.
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100).batch(global_batch_size)
@tf.function
def train_step(inputs):
features, labels = inputs
return labels - 0.3 * features
# Iterate over the dataset using the for..in construct.
for inputs in dataset:
print(train_step(inputs))
global_batch_size = 16
mirrored_strategy = tf.distribute.MirroredStrategy()
dataset = tf.data.Dataset.from_tensors(([1.], [1.])).repeat(100).batch(global_batch_size)
# Distribute input using the `experimental_distribute_dataset`.
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
# 1 global batch of data fed to the model in 1 step.
print(next(iter(dist_dataset)))
dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(64).batch(16)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
dataset = dataset.with_options(options)
mirrored_strategy = tf.distribute.MirroredStrategy()
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(64).batch(16)
dataset = dataset.shard(
input_context.num_input_pipelines, input_context.input_pipeline_id)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(2) # This prefetches 2 batches per device.
return dataset
dist_dataset = mirrored_strategy.experimental_distribute_datasets_from_function(dataset_fn)
global_batch_size = 16
mirrored_strategy = tf.distribute.MirroredStrategy()
dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(100).batch(global_batch_size)
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
@tf.function
def train_step(inputs):
features, labels = inputs
return labels - 0.3 * features
for x in dist_dataset:
# train_step trains the model using the dataset elements
loss = mirrored_strategy.run(train_step, args=(x,))
print("Loss is ", loss)
num_epochs = 10
steps_per_epoch = 5
for epoch in range(num_epochs):
dist_iterator = iter(dist_dataset)
for step in range(steps_per_epoch):
# train_step trains the model using the dataset elements
loss = mirrored_strategy.run(train_step, args=(next(dist_iterator),))
# which is the same as
# loss = mirrored_strategy.run(train_step, args=(dist_iterator.get_next(),))
print("Loss is ", loss)
# You can break the loop with get_next_as_optional by checking if the Optional contains value
global_batch_size = 4
steps_per_loop = 5
strategy = tf.distribute.MirroredStrategy(devices=["GPU:0", "CPU:0"])
dataset = tf.data.Dataset.range(9).batch(global_batch_size)
distributed_iterator = iter(strategy.experimental_distribute_dataset(dataset))
@tf.function
def train_fn(distributed_iterator):
for _ in tf.range(steps_per_loop):
optional_data = distributed_iterator.get_next_as_optional()
if not optional_data.has_value():
break
per_replica_results = strategy.run(lambda x:x, args=(optional_data.get_value(),))
tf.print(strategy.experimental_local_results(per_replica_results))
train_fn(distributed_iterator)
global_batch_size = 16
epochs = 5
steps_per_epoch = 5
mirrored_strategy = tf.distribute.MirroredStrategy()
dataset = tf.data.Dataset.from_tensors(([1.],[1.])).repeat(100).batch(global_batch_size)
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
@tf.function(input_signature=[dist_dataset.element_spec])
def train_step(per_replica_inputs):
def step_fn(inputs):
return 2 * inputs
return mirrored_strategy.run(step_fn, args=(per_replica_inputs,))
for _ in range(epochs):
iterator = iter(dist_dataset)
for _ in range(steps_per_epoch):
output = train_step(next(iterator))
tf.print(output)
mirrored_strategy = tf.distribute.MirroredStrategy()
dataset_size = 24
batch_size = 6
dataset = tf.data.Dataset.range(dataset_size).enumerate().batch(batch_size)
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
def predict(index, inputs):
outputs = 2 * inputs
return index, outputs
result = {}
for index, inputs in dist_dataset:
output_index, outputs = mirrored_strategy.run(predict, args=(index, inputs))
indices = list(mirrored_strategy.experimental_local_results(output_index))
rindices = []
for a in indices:
rindices.extend(a.numpy())
outputs = list(mirrored_strategy.experimental_local_results(outputs))
routputs = []
for a in outputs:
routputs.extend(a.numpy())
for i, value in zip(rindices, routputs):
result[i] = value
print(result)
mirrored_strategy = tf.distribute.MirroredStrategy()
worker_devices = mirrored_strategy.extended.worker_devices
def value_fn(ctx):
return tf.constant(1.0)
distributed_values = mirrored_strategy.experimental_distribute_values_from_function(value_fn)
for _ in range(4):
result = mirrored_strategy.run(lambda x:x, args=(distributed_values,))
print(result)
mirrored_strategy = tf.distribute.MirroredStrategy()
def input_gen():
while True:
yield np.random.rand(4)
# use Dataset.from_generator
dataset = tf.data.Dataset.from_generator(
input_gen, output_types=(tf.float32), output_shapes=tf.TensorShape([4]))
dist_dataset = mirrored_strategy.experimental_distribute_dataset(dataset)
iterator = iter(dist_dataset)
for _ in range(4):
mirrored_strategy.run(lambda x:x, args=(next(iterator),))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 분산 입력
Step2: 사용자가 존재하는 코드를 최소한으로 변경하면서 tf.distribute 전략을 사용할 수 있도록 tf.data.Dataset 인스턴스를 배포하고 분산 데이터세트 객체를 반환하는 두 개의 API가 도입되었습니다. 그런 다음 사용자는 이 분산 데이터세트 인스턴스를 반복하고 이전과 같이 모델을 훈련할 수 있습니다. 이제 두 가지 API tf.distribute.Strategy.experimental_distribute_dataset 및 tf.distribute.Strategy.experimental_distribute_datasets_from_function를 자세히 살펴보겠습니다.
Step3: 속성
Step4: tf.data.experimental.AutoShardPolicy에 대해서 세 가지 다른 옵션을 설정할 수 있습니다.
Step5: 속성
Step6: iter를 사용하여 명시적인 반복기 만들기
Step7: next() 또는 tf.distribute.DistributedIterator.get_next()를 사용하여 tf.distribute.DistributedIterator의 끝에 도달하면 OutOfRange 오류가 발생합니다. 클라이언트는 Python 측에서 오류를 포착하고 체크포인트 및 평가와 같은 다른 작업을 계속할 수 있습니다. 그러나 호스트 훈련 루프를 사용하는 경우(예
Step8: element_spec 속성 사용
Step9: 부분 배치
Step10: <a name="tensorinputs"> # 표준 tf.data.Dataset 인스턴스를 사용하지 않는 경우 데이터를 어떻게 배포하나요? </a>
Step11: 생성기에서 입력한 경우 tf.data.Dataset.from_generator 사용하기
|
7,520
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import os
from pandas import DataFrame
from pandas import read_csv
from numpy import mean
from numpy import std
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.style.use('ggplot')
import seaborn as sns
results = read_csv('parkinsons_updrs.csv')
results.head()
data = [results['motor_UPDRS'].describe(),results['total_UPDRS'].describe()]
df = pd.DataFrame(data)
df.round(2)
other_Stats= {'Median': [results['motor_UPDRS'].median(),results['total_UPDRS'].median()], 'Skew':[results['motor_UPDRS'].skew(),
...:results['total_UPDRS'].skew()],'Kurtosis':[results['motor_UPDRS'].kurt(), results['total_UPDRS'].kurt()]}
df1 = pd.DataFrame(other_Stats, index=['motor_UPDRS', 'total_UPDRS'])
df1.round(2)
plt.subplot(1, 2, 1)
plt.hist(results["motor_UPDRS"],color = "skyblue")
plt.xlabel('Motor_UPDRS Index')
plt.ylabel('Frequency')
plt.subplot(1, 2, 2)
plt.hist(results["total_UPDRS"],color = "green")
plt.xlabel('Total_UPDRS Index')
plt.show()
data1 = [results['motor_UPDRS'],results['total_UPDRS']]
fig, ax = plt.subplots(figsize=(5, 5))
plt.boxplot(data1)
ax.set_xlabel('motor_UPDRS, total_UPDRS')
ax.set_ylabel('Response')
plt.show()
ax=sns.factorplot(x="age", y="motor_UPDRS", col="sex", data = results, kind="box", size=3, aspect=2)
ax=sns.factorplot(x="age", y="total_UPDRS", col="sex", data = results, kind="box", size=3, aspect=2)
#groupby_subject= results.groupby('subject#')
sns.factorplot(x= 'subject#', y= 'motor_UPDRS', hue='age', col='sex', data=results, kind="swarm", size=3, aspect=3);
sns.factorplot(x= 'subject#', y= 'total_UPDRS', hue='age', col='sex', data=results, kind="swarm", size=3, aspect=3);
tab_1 = pd.crosstab(index=results["subject#"], columns="count")
print(tab_1)
tab_2 = pd.crosstab(index=results["age"], columns="count")
plt.hist(results['age'], color="violet")
plt.ylabel('Qty of observations');
plt.xlabel('Age')
plt.show()
print(tab_2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Statistics results suggest
Step2: Dotplots with grouping by Subject, Age and Sex
|
7,521
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
u=np.random.random()
print u
v=np.random.random(5)
print v
A=np.random.random((2,3))
print A
x=np.random.random(2000)
histo=plt.hist(x, bins=15, normed=True, color='g')
plt.plot([0,1], [1,1], 'r')# graficul densitatii uniforme pe [0,1)
def fexpo(theta, x):
return np.exp(-x/theta)/theta
theta=2.7
N=2000# nr de valori generate
x=np.random.exponential(theta, N)
histo=plt.hist(x, bins=20, normed=True, color='g', alpha=0.6)
xmin=np.min(x)
xmax=np.max(x)
M=100
X=np.linspace(xmin, xmax, M)
Y=fexpo(theta, X)
plt.plot(X,Y, 'r')#traseaza graficul densitatii exponentiale
val = np.loadtxt("Vizite.txt")
histog=plt.hist(val,bins=100, normed=True, color='r')
from IPython.core.display import HTML
def css_styling():
styles = open("./custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Seed-ul se seteaza in perioada de debugging a codului, pentru ca avandu-l setat in orice rulare se genereaza acelasi sir de numere.
Step2: Functia np.random.exponential(theta, size=None) returneaza valori de observatie asupra unei variabile aleatoare exponential distribuita, de parametru $\theta=theta$.
Step3: Generam N valori de observatie asupra distributiei $Exp(\theta)$, afisam histograma valorilor
Step4: Functia np.random.exponential implementeaza algoritmul de simulare a distributiei Exp prin metoda inversarii
Step5: Fisierul Vizite.txt contine numarul de vizite ale site-urilor monitorizate de sati in luna martie.
|
7,522
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import numpy as np
import matplotlib.pylab as plt
import padasip as pa
%matplotlib inline
plt.style.use('ggplot') # nicer plots
np.random.seed(52102) # always use the same random seed to make results comparable
%config InlineBackend.print_figure_kwargs = {}
u = np.arange(0,10,1)
u
x = pa.input_from_history(u, 4)
x
n = 4
x = pa.input_from_history(u, n, bias=True)
x
N = len(u)
n = 4
N - n + 1
len(x)
# signals creation: u, v, d
N = 5000
n = 10
u = np.sin(np.arange(0, N/10., N/50000.))
v = np.random.normal(0, 1, N)
d = u + v
# filtering
x = pa.input_from_history(d, n)[:-1]
d = d[n:]
u = u[n:]
f = pa.filters.FilterRLS(mu=0.9, n=n)
y, e, w = f.run(d, x)
# error estimation
MSE_d = np.dot(u-d, u-d) / float(len(u))
MSE_y = np.dot(u-y, u-y) / float(len(u))
# results
plt.figure(figsize=(12.5,6))
plt.plot(u, "r:", linewidth=4, label="original")
plt.plot(d, "b", label="noisy, MSE: {}".format(MSE_d))
plt.plot(y, "g", label="filtered, MSE: {}".format(MSE_y))
plt.xlim(N-100,N)
plt.legend()
plt.tight_layout()
plt.show()
# creation of x and d
N = 700
x = np.random.random((N, 4))
v = np.random.normal(0, 1, N) * 0.1
d = 2*x[:,0] + 0.1*x[:,1] - 4*x[:,2] + 0.5*x[:,3] + v
# identification
f = pa.filters.FilterNLMS(mu=0.5, n=4)
y, e, w = f.run(d, x)
# show results
plt.figure(figsize=(12.5,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("Number of iteration [-]")
plt.plot(d,"b", label="d - target")
plt.plot(y,"g", label="y - output")
plt.xlim(0, N)
plt.legend()
plt.subplot(212); plt.title("Filter error"); plt.xlabel("Number of iteration [-]")
plt.plot(pa.misc.logSE(e),"r", label="Squared error [dB]");plt.legend()
plt.xlim(0, N)
plt.tight_layout()
plt.show()
print("And the resulting coefficients are: {}".format(w[-1]))
# creation of u, x and d
N = 100
u = np.random.random(N)
d = np.zeros(N)
for k in range(3, N):
d[k] = 2*u[k] + 0.1*u[k-1] - 4*u[k-2] + 0.5*u[k-3]
d = d[3:]
# identification
x = pa.input_from_history(u, 4)
f = pa.filters.FilterRLS(mu=0.1, n=4)
y, e, w = f.run(d, x)
# show results
plt.figure(figsize=(12.5,9))
plt.subplot(211);plt.title("Adaptation");plt.xlabel("Number of iteration [-]")
plt.plot(d,"b", label="d - target")
plt.plot(y,"g", label="y - output");plt.legend()
plt.subplot(212);plt.title("Filter error");plt.xlabel("Number of iteration [-]")
plt.plot(pa.misc.logSE(e),"r", label="Squared error [db]");plt.legend()
plt.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Construction of Input Vectors (Input Matrix) from a Time Series
Step2: If the series is only an input of the adaptive filter, the input matrix can be a so called Delay Embedded Matrix (DEM). The construction of the input matrix can be simply created as
Step3: In this matrix, every row represents an input vector for one sample. If you want to extend the input matrix with a bias, then
Step4: The length of the input matrix (number of rows) is
Step5: is equal to
Step6: Noise Cancelation (Example with RLS Filter)
Step7: According to this result, the adaptive filter can decrease the MSE caused by noise.
Step8: Prediction (Example with RLS Filter)
|
7,523
|
<ASSISTANT_TASK:>
Python Code:
fname = io.download_occultation_times(outdir='../data/')
print(fname)
tlefile = io.download_tle(outdir='../data')
print(tlefile)
times, line1, line2 = io.read_tle_file(tlefile)
tstart = '2021-01-08T10:00:00'
tend = '2021-01-08T17:00:00'
orbits = planning.sunlight_periods(fname, tstart, tend)
orbits
# Get the solar parameter
from sunpy.coordinates import sun
angular_size = sun.angular_radius(t='now')
dx = angular_size.arcsec
print(dx)
pa = planning.get_nustar_roll(tstart, 0)
print(tstart)
print("NuSTAR Roll angle for Det0 in NE quadrant: {}".format(pa))
# Orbit 1 (West limb)
offset = [700., -300.]*u.arcsec
ind = 1
orbit = orbits[0]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
# Orbit 2 (East Limb)
offset = [-800., 150.]*u.arcsec
for oi in [1, 2, 3]:
ind = oi+1
orbit = orbits[oi]
midTime = (0.5*(orbit[1] - orbit[0]) + orbit[0])
sky_pos = planning.get_skyfield_position(midTime, offset, load_path='./data', parallax_correction=True)
print("Orbit: {}".format(ind))
print(f"Solar offset: {offset}")
print("Orbit start: {} Orbit end: {}".format(orbit[0].iso, orbit[1].iso))
print(f'Aim time: {midTime.iso} RA (deg): {sky_pos[0]:8.4f} Dec (deg): {sky_pos[1]:8.4f}')
print("")
test1 = SkyCoord(289.3792274160115, -22.304595055979675, unit = 'deg')
orb1 = SkyCoord(289.3855, -22.3051, unit = 'deg')
orb1.separation(test1)
import sunpy
sunpy.__version__
test1 = SkyCoord(289.898451566591, -22.158432904027155 , unit = 'deg')
orb1 = SkyCoord(289.9047, -22.1589, unit = 'deg')
orb1.separation(test1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the NuSTAR TLE archive.
Step2: Here is where we define the observing window that we want to use.
|
7,524
|
<ASSISTANT_TASK:>
Python Code:
HOME_DIR = 'd:/larc_projects/job_analytics/'; DATA_DIR = HOME_DIR + 'data/clean/'
RES_DIR = HOME_DIR + 'results/'
skill_df = pd.read_csv(DATA_DIR + 'skill_index.csv')
doc_skill = buildDocSkillMat(jd_docs, skill_df, folder=DATA_DIR)
with(open(DATA_DIR + 'doc_skill.mtx', 'w')) as f:
mmwrite(f, doc_skill)
extracted_skill_df = getSkills4Docs(docs=doc_index['doc'], doc_term=doc_skill, skills=skills)
df = pd.merge(doc_index, extracted_skill_df, left_index=True, right_index=True)
print(df.shape)
df.head()
df.to_csv(DATA_DIR + 'doc_index.csv') # later no need to extract skill again
reload(ja_helpers)
from ja_helpers import *
# load frameworks of SF as docs
pst_docs = pd.read_csv(DATA_DIR + 'SF/pst.csv')
pst_docs
pst_skill = buildDocSkillMat(pst_docs, skill_df, folder=None)
with(open(DATA_DIR + 'pst_skill.mtx', 'w')) as f:
mmwrite(f, pst_skill)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build feature matrix
Step2: Get skills in each JD
Step3: Extract features of new documents
|
7,525
|
<ASSISTANT_TASK:>
Python Code:
response = requests.get('https://api.spotify.com/v1/search?q=lil&type=artist&?country=US&limit=50')
data = response.json()
type(data)
data.keys()
data['artists'].keys()
artists = data['artists']['items']
for artist in artists:
print(artist['name'], artist['popularity'])
for artist in artists:
print (artist['name'], artist['popularity'])
if len(artist['genres']) != 0:
print(", ".join(artist['genres'])
else:
print ("No genres listed")
# AGGREGATION PROBLEM
all_genres = []
# THE LOOP
for artist in artists:
print ("ALL GENRES WE'VE HEARD OF:", all_genres)
# THE CONDITIONAL: none
print("Current artist has:", artist['genres'])
all_genres = all_genres + artist['genres']
print ("ALLL THE GENRES WE'VE HEARD OF, FINALLY, AT THE END:")
# Has repeats
print (all_genres)
# your_list = ['a', 'b', 'c', 'c', 'c']
# your_list.count('a') = 1
# your_list.count('c') = 3
# This is bad becuase of the duplicates
for genre in all_genres:
genre_count = all_genres.count(genre)
print(genre, "shows up", genre_count, "times")
# We need a UNIQUE list of all_genres, a.k.a. a list
# with all duplicates removed
# unique_list = set(list_with_duplicates)
unique_genres = set(all_genres)
for genre in unique_genres:
genre_count = all_genres.count(genre)
print(genre, "shows up", genre_count, "times")
# There is a library that comes with Python called COLLECTIONS!
# Inside of it is a magic thing alled Counter
# import collections
from collections import Counter
# all_genres = ['sourthern hip hop', 'souther hip hop', 'crunk', ]
counts = Counter(all_genres) #looks like dictionary # import collection -> collection.Counter
counts['crunk'] #so we can do this
counts.most_common(4)
most_popular_name = ""
most_popular_score = 0
for artist in artists:
print ("Looking at", artist['name'], "who has popularity score of", artist['popularity'])
# THE CONDITIONAL
# a.k.a. what you are testing
print ("Comparing", artist['popularity'], "to", most_popular_score)
if artist['popularity'] > most_popular_score:
print ("FOUND NEW MOST POPULAR, checking to see if it's Lil Wayne")
# Seeing if it's actually Lil Wayne
if artist['name'] == "Lil Wayne":
print ("Nice try Lil Wayne, we don't care")
# THE CHANGE
# a.k.a. what you're keeping track of
else:
print ("NEW POPULAR! Updating our notebook")
most_popular_name = artist['name']
most_popular_score = artist['popularity']
print ("+++++++++++++++++++++++")
print(most_popular_name, most_popular_score)
target_score = 72
# 1: INITIAL CONDITION
# We have no one who is in our list yet
second_best_artists = []
# AGGREGATION PROBLEM
# When you're looking through a series of serious objects
# and sometimes you want to add one of those objects
# to a DIFFERENT list
for artist in artists:
print ("Looking at", artist['name'], "who has a popularity of", artist['popularity'])
# 2: CONDITIONAL
# When we want to add someone to our list
if artist['popularity'] == 72:
print ("!!!!!! The artist's popularity is 72")
# 3: THE CHANGE
# Add that artist to our list
# .append(newthing) is how we do that in Python
second_best_artists.append(artist['name'])
print ("OUR SECOND BEST ARTISTS ARE:")
for artist in second_best_artists:
print(artist['name'])
for artist in artists:
# print ("Looking at", artist['name'])
print
if artist['name'] == "Lil' Kim":
print ('Found Lil Kim')
print(artist['popularity'])
else:
pass
# print("Not Lil Kim")
lil_kim_popularity = 62
# AGGREGATION PROBLEM
more_popular_than_lil_kim = []
# THE LOOP
for artist in artists:
# THE CONDITIONAL is the artist more popular than lil kim?
if artist['popularity'] > lil_kim_popularity:
# IF yes, let's add them to our list
print (artist['name'], "is MORE POPULAR with a socre of", artist['popularity'])
more_popular_than_lil_kim.append(artist['name'])
else:
print(artist['name'], "is less popular with a socre of", artist['popularity'])
print ("++++++++++++++ MORE POPULAR THAN LIL KIM")
for artist_name in more_popular_than_lil_kim:
print (artist_name)
more_popular_string = ", ".join(more_popular_than_lil_kim)
print ("Artists more popular than Lil' Kim are:", more_popular_string)
response = requests.get('https://api.spotify.com/v1/search?q=lil&type=artist&?country=US&limit=50')
small_data = response.json()
small_data['artists']
print (len(small_data['artists']['items'])) # we only get 10 artists
print(data['artists']['total'])
import math
#response = requests.get('https://api.spotify.com/v1/search?q=lil&type=artist&limit=50')
#small_data = response.json()
page_count = math.ceil(4502/50)
page_count
list(range(0,91))
# First Page: artists 1-50
# 'https://api.spotify.com/v1/search?q=lil&type=artist&limit=50'
# Second Page: artists 51-100, offset of 50
# 'https://api.spotify.com/v1/search?q=lil&type=artist&limit=50&offset=50'
# Third Page: artists 101-150
# 'https://api.spotify.com/v1/search?q=lil&type=artist&limit=50&offset=100'
# Forth Page: artists 151-200
# 'https://api.spotify.com/v1/search?q=lil&type=artist&limit=50&offset=150'
all_artists = []
for page in range(91):
offset = (page*50)
print ("we are on page", page+1, "with an offset of", offset)
# make the request with a changed offset ?offset={offset}
# data = response.json()
# add our new artists to our list of existing artists
# all_artists = all_artists + data['artists']['items']
print ("successfully retrived", len(all_artists), "artists")
response = request.get('')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2) What genres are most represented in the search results? Edit your previous printout to also display a list of their genres in the format "GENRE_1, GENRE_2, GENRE_3". If there are no genres, print "No genres listed".
Step2: 3) Use a for loop to determine who BESIDES Lil Wayne has the highest popularity rating. Is it the same artist who has the largest number of followers?
Step3: 4) Print a list of Lil's that are more popular than Lil' Kim.
Step4: How to automate getting all of the results
|
7,526
|
<ASSISTANT_TASK:>
Python Code:
data_id = '17d'
ph_sel_name = "None"
data_id = "17d"
from fretbursts import *
sns = init_notebook()
import os
import pandas as pd
from IPython.display import display, Math
import lmfit
print('lmfit version:', lmfit.__version__)
figure_size = (5, 4)
default_figure = lambda: plt.subplots(figsize=figure_size)
save_figures = True
def savefig(filename, **kwargs):
if not save_figures:
return
import os
dir_ = 'figures/'
kwargs_ = dict(dpi=300, bbox_inches='tight')
#frameon=True, facecolor='white', transparent=False)
kwargs_.update(kwargs)
plt.savefig(dir_ + filename, **kwargs_)
print('Saved: %s' % (dir_ + filename))
PLOT_DIR = './figure/'
import matplotlib as mpl
from cycler import cycler
bmap = sns.color_palette("Set1", 9)
colors = np.array(bmap)[(1,0,2,3,4,8,6,7), :]
mpl.rcParams['axes.prop_cycle'] = cycler('color', colors)
colors_labels = ['blue', 'red', 'green', 'violet', 'orange', 'gray', 'brown', 'pink', ]
for c, cl in zip(colors, colors_labels):
locals()[cl] = tuple(c) # assign variables with color names
sns.palplot(colors)
data_dir = './data/multispot/'
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(glob(data_dir + '*_?.hdf5'))
labels = ['12d', '7d', '17d', '22d', '27d', 'DO']
files_dict = {lab: fname for lab, fname in zip(sorted(labels), file_list)}
files_dict
_fname = 'results/Multi-spot - leakage coefficient KDE wmean DexDem.csv'
leakageM = np.loadtxt(_fname, ndmin=1)
print('Leakage coefficient:', leakageM)
_fname = 'results/usALEX - direct excitation coefficient dir_ex_t beta.csv'
dir_ex_tM = np.loadtxt(_fname, ndmin=1)
print('Direct excitation coefficient (dir_ex_t):', dir_ex_tM)
_fname = 'results/Multi-spot - gamma factor.csv'
gammaM = np.loadtxt(_fname, ndmin=1)
print('Multispot gamma coefficient:', gammaM)
_fname = 'results/usALEX - leakage coefficient DexDem.csv'
leakageA = np.loadtxt(_fname)
print('usALEX Leakage coefficient:', leakageA)
_fname = 'results/usALEX - gamma factor - all-ph.csv'
gammaA = np.loadtxt(_fname)
print('usALEX Gamma-factor:', gammaA)
_fname = 'results/usALEX - beta factor - all-ph.csv'
betaA = np.loadtxt(_fname)
print('usALEX Gamma-factor:', betaA)
_fname = 'results/usALEX - direct excitation coefficient dir_ex_aa.csv'
dir_ex_aa = np.loadtxt(_fname)
print('Direct excitation coefficient (dir_ex_aa):', dir_ex_aa)
dir_ex_tA = betaA * dir_ex_aa
dir_ex_tA
donor_ref = False # False -> gamma correction is: g*nd + na
# True -> gamma correction is: nd + na/g
hist_weights = 'size'
## Background fit parameters
bg_kwargs_auto = dict(fun=bg.exp_fit,
time_s = 30,
tail_min_us = 'auto',
F_bg=1.7,
)
## Burst search
F=6
dither = False
size_th = 30 # Burst size threshold (selection on corrected burst sizes)
## FRET fit parameters
bandwidth = 0.03 # KDE bandwidth
E_range = {'7d': (0.7, 1.0), '12d': (0.4, 0.8), '17d': (0.2, 0.4),
'22d': (0.0, 0.1), '27d': (0.0, 0.1), 'DO': (0.0, 0.1)}
E_axis_kde = np.arange(-0.2, 1.2, 0.0002)
def print_fit_report(E_pr, gamma=1, leakage=0, dir_ex_t=0, math=True):
Print fit and standard deviation for both corrected and uncorrected E
Returns d.E_fit.
E_corr = fretmath.correct_E_gamma_leak_dir(E_pr, gamma=gamma, leakage=leakage, dir_ex_t=dir_ex_t)
E_pr_mean = E_pr.mean()*100
E_pr_delta = (E_pr.max() - E_pr.min())*100
E_corr_mean = E_corr.mean()*100
E_corr_delta = (E_corr.max() - E_corr.min())*100
if math:
display(Math(r'\text{Pre}\;\gamma\quad\langle{E}_{fit}\rangle = %.1f\%% \qquad'
'\Delta E_{fit} = %.2f \%%' % \
(E_pr_mean, E_pr_delta)))
display(Math(r'\text{Post}\;\gamma\quad\langle{E}_{fit}\rangle = %.1f\%% \qquad'
'\Delta E_{fit} = %.2f \%%' % \
(E_corr_mean, E_corr_delta)))
else:
print('Pre-gamma E (delta, mean): %.2f %.2f' % (E_pr_mean, E_pr_delta))
print('Post-gamma E (delta, mean): %.2f %.2f' % (E_corr_mean, E_corr_delta))
d = loader.photon_hdf5(files_dict[data_id])
d.calc_bg(**bg_kwargs_auto)
d.burst_search(m=10, F=F, dither=dither)
d.time_max
ds = Sel(d, select_bursts.size, th1=30, gamma=gammaM, donor_ref=donor_ref)
ds.num_bursts
# fitter = bext.bursts_fitter(ds)
# fitter.histogram(bins=np.r_[-0.2 : 1.2 : bandwidth])
# fitter.model = mfit.factory_two_gaussians(add_bridge=False, p2_center=0.4)
# fitter.fit_histogram()
# display(fitter.params['p2_center'])
# print_fit_report(fitter.params['p2_center'], gamma=gammaM, leakage=leakageM, dir_ex_t=dir_ex_tM)
dplot(ds, hist_fret);
#show_model=True, show_fit_stats=True, fit_from='p2_center', show_fit_value=True);
d_all = ds.collapse()
d_all_chunk = Sel(d_all, select_bursts.time, time_s2=600/8)
dplot(d_all_chunk, hist_fret)
Eraw = d_all_chunk.E[0]
E = fretmath.correct_E_gamma_leak_dir(Eraw, gamma=gammaM, leakage=leakageM, dir_ex_t=dir_ex_tM)
sns.set_style('whitegrid')
%config InlineBackend.figure_format='retina' # for hi-dpi displays
plt.hist(E, bins=np.arange(-0.2, 1.2, 0.025) + 0.5*0.025);
bursts_usalex = pd.read_csv('results/bursts_usALEX_{sample}_{ph_sel}_F{F:.1f}_m{m}_size{th}.csv'
.format(sample=data_id, ph_sel='Dex', m=10, th=30, F=7), index_col=0)
bursts_usalex
Eraw_alex = bursts_usalex.E
E_alex = fretmath.correct_E_gamma_leak_dir(Eraw_alex, gamma=gammaA, leakage=leakageA, dir_ex_t=dir_ex_tA)
kws = dict(bins=np.arange(-0.2, 1.2, 0.025) + 0.5*0.025, histtype='step', lw=1.8)
plt.hist(E, label='Multispot', **kws)
plt.hist(E_alex, label='μs-ALEX', **kws)
plt.legend(loc=2)
plt.title('Sample %s: Multispot vs μs-ALEX comparison' % data_id)
plt.xlabel('FRET Efficiency')
plt.ylabel('# Bursts');
savefig('Multispot vs usALEX FRET hist comp sample %s' % data_id)
kws = dict(bins=np.arange(-0.2, 1.2, 0.025) + 0.5*0.025, histtype='step', lw=1.8, normed=True)
plt.hist(E, label='Multispot', **kws)
plt.hist(E_alex, label='μs-ALEX', **kws)
plt.legend(loc=2)
plt.title('Sample %s: Multispot vs μs-ALEX comparison' % data_id)
plt.xlabel('FRET Efficiency')
plt.ylabel('Probabiltity');
savefig('Multispot vs usALEX FRET hist comp sample %s normed' % data_id)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multi-spot vs usALEX FRET histogram comparison
Step2: 8-spot paper plot style
Step3: Data files
Step4: Check that the folder exists
Step5: List of data files in data_dir
Step6: Correction parameters
Step7: Load the multispot direct excitation coefficient ($d_{dirT}$) from disk (computed in usALEX - Corrections - Direct excitation physical parameter)
Step8: Load the multispot gamma ($\gamma_M$) coefficient (computed in Multi-spot Gamma Fitting)
Step9: usALEX
Step10: Load the usALEX gamma coefficient (computed in usALEX - Corrections - Gamma factor fit)
Step11: Load the usALEX beta coefficient (computed in usALEX - Corrections - Gamma factor fit)
Step12: Load the usALEX direct-excitation coefficient ($d_{exAA}$) (computed in usALEX - Corrections - Direct excitation fit)
Step13: Compute usALEX direct-excitation coefficient ($d_{exT}$) (see usALEX - Corrections - Direct excitation physical parameter)
Step14: Parameters
Step16: Utility functions
Step17: Multispot analysis
Step18: Comparison with usALEX
|
7,527
|
<ASSISTANT_TASK:>
Python Code:
a = 243748.890365
b = 501771.703058 - 243748.890365
c = 752464.582 - 501771.703058
d = 981305.261623 - 752464.582
e = 1.175989e+06 - 981305.261623
ghi_CaseA_Boulder = [a, b, c, d, e]
ghi_CaseA_Boulder
epwfile = r'C:\Users\sayala\Documents\GitHub\internStuff\weatherFiles\USA_CO_Boulder-Broomfield-Jefferson.County.AP.724699_TMY3.epw'
epwfile2 = r'C:\Users\sayala\Documents\GitHub\bifacial_radiance\bifacial_radiance\TEMP\PuertoRico\EPWs\PRI_Mercedita.AP.785203_TMY3.epw'
import bifacial_radiance
testfolder = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\Routine1'
demo = bifacial_radiance.RadianceObj('test', testfolder)
metdata = demo.readWeatherFile(epwfile)
# from HPC Compiling printout
boulder_frontirrad = [239565.890906, 252806.845632, 245879.088559, 223025.93448100006, 188855.24042199994]
metdata.datetime[8758]
starts = [2881, 3626, 4346, 5090, 5835]
ends = [3621, 4341, 5085, 5829, 6550]
ghi_Boulder = []
for ii in range(0, len(starts)):
start = starts[ii]
end = ends[ii]
ghi_Boulder.append(metdata.ghi[start:end].sum())
ghi_Boulder
metdata = demo.readWeatherFile(epwfile2)
metdata.datetime[8758]
starts = [2881, 3626, 4346, 5090, 5835]
ends = [3621, 4341, 5085, 5829, 6550]
ghi_PR=[]
for ii in range(0, len(starts)):
start = starts[ii]
end = ends[ii]
ghi_PR.append(metdata.ghi[start:end].sum())
ghi_PR
#Main Variables needed throughout
albedo = 0.6
sim_general_name = 'bifacial_example'
lat = 37.5
lon = -77.6
epwfile = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\USA_VA_Richmond.Intl.AP.724010_TMY.epw'
testfolder = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\Routine1'
moduletype = 'Prism Solar Bi60 landscape'
tilt = 10
pitch = 3
clearance_height = 0.2
azimuth = 90
nMods = 20
nRows = 7
hpc = True
import bifacial_radiance
sim_name = sim_general_name+'_'+str(idx)
demo = bifacial_radiance.RadianceObj(sim_name,str(testfolder))
demo.setGround(albedo)
metdata = demo.readWeatherFile(epwfile)
demo.gendaylit(idx)
sceneDict = {'tilt':tilt,'pitch':pitch,'clearance_height':clearance_height,'azimuth':azimuth, 'nMods': nMods, 'nRows': nRows}
scene = demo.makeScene(moduletype=moduletype,sceneDict=sceneDict, hpc=hpc, radname = sim_name)
octfile = demo.makeOct(octname = demo.basename , hpc=hpc)
analysis = bifacial_radiance.AnalysisObj(octfile=octfile, name=sim_name)
frontscan, backscan = analysis.moduleAnalysis(scene=scene)
analysis.analysis(octfile, name=sim_name, frontscan=frontscan, backscan=backscan)
tilt = 30
#Main Variables needed throughout
albedo = 0.6
sim_general_name = 'bifacial_example'
lat = 37.5
lon = -77.6
epwfile = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\USA_VA_Richmond.Intl.AP.724010_TMY.epw'
testfolder = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\Routine1'
moduletype = 'Prism Solar Bi60 landscape'
tilt = 10
pitch = 3
clearance_height = 0.2
azimuth = 90
nMods = 20
nRows = 7
hpc = True
import bifacial_radiance
sim_name = sim_general_name+'_'+str(tilt)
demo = bifacial_radiance.RadianceObj(sim_name,str(testfolder))
demo.setGround(albedo)
metdata = demo.readWeatherFile(epwfile)
demo.genCumSky(savefile = sim_name)
sceneDict = {'tilt':tilt,'pitch':pitch,'clearance_height':clearance_height,'azimuth':azimuth, 'nMods': nMods, 'nRows': nRows}
scene = demo.makeScene(moduletype=moduletype,sceneDict=sceneDict, hpc=hpc, radname = sim_name)
octfile = demo.makeOct(octname = demo.basename , hpc=hoc)
analysis = bifacial_radiance.AnalysisObj(octfile=octfile, name=sim_name)
frontscan, backscan = analysis.moduleAnalysis(scene=scene)
analysis.analysis(octfile, name=sim_name, frontscan=frontscan, backscan=backscan)
# ADD LATER
#Main Variables needed throughout
albedo = 0.6
sim_general_name = 'bifacial_example'
lat = 37.5
lon = -77.6
epwfile = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\USA_VA_Richmond.Intl.AP.724010_TMY.epw'
testfolder = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\Routine1'
moduletype = 'Prism Solar Bi60 landscape'
hub_height = 0.2
nMods = 20
nRows = 7
hpc = True
limit_angle = 60
backtrack = True
gcr = 0.35
angledelta = 0.01
starttime = '21_11_06_10'# 'YY_MM_DD_HH'
endtime = starttime
cumulativesky = False
import bifacial_radiance
sim_name = sim_general_name + starttime
demo = bifacial_radiance.RadianceObj(sim_name, path=testfolder)
demo.setGround(albedo)
metdata = demo.readWeatherFile(epwfile, coerce_year=2021, daydate='05_01')
sceneDict = {'gcr':gcr,'hub_height':hub_height, 'nMods': nMods, 'nRows': nRows}
trackerdict = demo.set1axis(limit_angle = limit_angle, backtrack = backtrack, gcr = gcr, cumulativesky = cumulativesky)
metdata = demo.readWeatherFile(epwfile, coerce_year=2021, daydate='21_05_22')
sceneDict = {'gcr':gcr,'hub_height':hub_height, 'nMods': nMods, 'nRows': nRows}
trackerdict = demo.set1axis(limit_angle = limit_angle, backtrack = backtrack, gcr = gcr, cumulativesky = cumulativesky)
foodict = {k: v for k, v in trackerdict.items() if k.startswith('21_'+day_date)}
foodict
foodict
enddate
import datetime as dt
startindex = list(metdata.datetime).index(dt.datetime.strptime(startdate,'%y_%m_%d_%H'))
startindex
enddate
startindex = list(metdata.datetime).index(dt.datetime.strptime(startdate,'%y_%m_%d_%H'))
startindex
startdate = list(foodict.keys())[0][:-3]
enddate = list(foodict.keys())[-1][:-3]
trackerdict = demo.gendaylit1axis(trackerdict = foodict, startdate = startdate, enddate = enddate, hpc=True)
trackerdict
trackerdict = demo.makeScene1axis(moduletype=moduletype,sceneDict=sceneDict, cumulativesky=cumulativesky, hpc=hpc) #makeScene creates a .rad file with 20 modules per row, 7 rows.
trackerdict = demo.makeOct1axis(customname = sim_name, hpc=hpc)
demo.analysis1axis(customname = sim_name, hpc=hpc)
#Main Variables needed throughout
albedo = 0.6
sim_general_name = 'bifacial_example'
lat = 37.5
lon = -77.6
epwfile = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\USA_VA_Richmond.Intl.AP.724010_TMY.epw'
testfolder = r'C:\Users\sayala\Documents\RadianceScenes\HPC_Test\Routine1'
moduletype = 'Prism Solar Bi60 landscape'
hub_height = 0.2
nMods = 20
nRows = 7
hpc = True
limit_angle = 60
backtrack = True
gcr = 0.35
angledelta = 0.01
starttime = '2021_11_06_10'# 'YY_MM_DD_HH'
endtime = starttime
cumulativesky = True
theta = str(20)
import bifacial_radiance
sim_name = sim_general_name + theta
demo = bifacial_radiance.RadianceObj(simulationName, path=testfolder)
demo.setGround(albedo)
metdata = demo.readWeatherFile(epwfile, starttime=starttime, endttime=endttime, coerce_year=2021)
sceneDict = {'gcr':gcr,'hub_height':hub_height, 'nMods': nMods, 'nRows': nRows}
trackerdict =demo.set1axis(limit_angle = limit_angle, backtrack = backtrack, gcr = gcr, cumulativesky = cumulativesky)
trackerdict = demo.genCumSky1axis()
trackerdict = demo.makeScene1axis(trackerdict = trackerdict[tetha], moduletype=moduletype,sceneDict=sceneDict, cumulativesky=cumulativesky, hpc=hpc) #makeScene creates a .rad file with 20 modules per row, 7 rows.
trackerdict = demo.makeOct1axis(customname = sim_name, hpc=hpc)
demo.analysis1axis(customname = sim_name, hpc=hpc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: B. Gencumsky
Step2: C. Option
Step3: <a id='step2'></a>
Step4: GencumSky1axis, looping over tracker_angles
|
7,528
|
<ASSISTANT_TASK:>
Python Code:
import sklearn
import numpy
import chaospy
samples = numpy.linspace(0, 5, 50)
numpy.random.seed(1000)
noise = chaospy.Normal(0, 0.1).sample(50)
evals = numpy.sin(samples) + noise
from matplotlib import pyplot
pyplot.rc("figure", figsize=[15, 6])
pyplot.scatter(samples, evals)
pyplot.show()
q0 = chaospy.variable()
expansion = chaospy.polynomial([1, q0, q0**2, q0**3])
fitted_polynomial = chaospy.fit_regression(
expansion, samples, evals)
pyplot.scatter(samples, evals)
pyplot.plot(samples, fitted_polynomial(samples))
pyplot.show()
fitted_polynomial.round(4)
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=False)
fitted_polynomial = chaospy.fit_regression(
expansion, samples, evals, model=model)
pyplot.scatter(samples, evals)
pyplot.plot(samples, fitted_polynomial(samples))
pyplot.show()
fitted_polynomial.round(4)
from sklearn import linear_model as lm
kws = {"fit_intercept": False}
univariate_models = {
"least squares": lm.LinearRegression(**kws),
"elastic net": lm.ElasticNet(alpha=0.1, **kws),
"lasso": lm.Lasso(alpha=0.1, **kws),
"lasso lars": lm.LassoLars(alpha=0.1, **kws),
"lars": lm.Lars(**kws),
"orthogonal matching pursuit":
lm.OrthogonalMatchingPursuit(n_nonzero_coefs=3, **kws),
"ridge": lm.Ridge(alpha=0.1, **kws),
"bayesian ridge": lm.BayesianRidge(**kws),
"auto relevant determination": lm.ARDRegression(**kws),
}
for label, model in univariate_models.items():
fitted_polynomial = chaospy.fit_regression(
expansion, samples, evals, model=model)
pyplot.plot(samples, fitted_polynomial(samples))
pyplot.scatter(samples, evals)
pyplot.show()
from problem_formulation import (
model_solver, joint, error_in_mean, error_in_variance)
multivariate_models = {
"least squares": lm.LinearRegression(**kws),
"elastic net": lm.MultiTaskElasticNet(alpha=0.2, **kws),
"lasso": lm.MultiTaskLasso(alpha=0.2, **kws),
"lasso lars": lm.LassoLars(alpha=0.2, **kws),
"lars": lm.Lars(n_nonzero_coefs=3, **kws),
"orthogonal matching pursuit": \
lm.OrthogonalMatchingPursuit(n_nonzero_coefs=3, **kws),
"ridge": lm.Ridge(alpha=0.2, **kws),
}
import warnings
warnings.filterwarnings("ignore", category=RuntimeWarning)
expansion = chaospy.generate_expansion(2, joint)
samples = joint.sample(50)
evals = numpy.array([model_solver(sample) for sample in samples.T])
for label, model in multivariate_models.items():
fitted_polynomial, coeffs = chaospy.fit_regression(
expansion, samples, evals, model=model, retall=True)
self_evals = fitted_polynomial(*samples)
error_mean_ = error_in_mean(chaospy.E(fitted_polynomial, joint))
error_var_ = error_in_variance(chaospy.Var(fitted_polynomial, joint))
count_non_zero = numpy.sum(numpy.any(coeffs, axis=-1))
print(f"{label:<30} {error_mean_:.5f} " +
f"{error_var_:.5f} {count_non_zero}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As en example to follow, consider the following artificial case
Step2: Least squares regression
Step3: Least squares regression is also supported by sklearn. So it is possible to get the same result using the LinearRegression model. For example
Step4: It is important to note that sklearn often does extra operations that may interfere with the compatibility of chaospy. Here fit_intercept=False ensures that an extra columns isn't added needlessly. An error will be raised if this is forgotten.
Step5: Again, as the polynomials already addresses the constant term, it is important to remember to include fit_intercept=False for each model.
Step6: Multi-variable regression
Step7: The methods that support multi-label dimensional responses are
Step8: To illustrate the difference between the methods, we do the simple error analysis
|
7,529
|
<ASSISTANT_TASK:>
Python Code:
# access yelp.csv using a relative path
import pandas as pd
yelp = pd.read_csv('/GA-SEA-DAT2/data/yelp.csv')
yelp.head(1)
# read the data from yelp.json into a list of rows
# each row is decoded into a dictionary named "data" using using json.loads()
import json
import pandas as pd
with open('../data/yelp.json', 'rU') as f:
data = [json.loads(row) for row in f]
# show the first review
data[0]
# convert the list of dictionaries to a DataFrame
df = pd.DataFrame.from_dict(data, orient='columns')
print(df)
# add DataFrame columns for cool, useful, and funny
df['cool'] = [row['votes']['cool'] for row in data]
df['useful'] = [row['votes']['useful'] for row in data]
df['funny'] = [row['votes']['funny'] for row in data]
# drop the votes column and then display the head
df.drop('votes', axis=1, inplace=True)
# treat stars as a categorical variable and look for differences between groups by comparing the means of the groups
df.groupby('stars').mean()
# display acorrelation matrix of the vote types (cool/useful/funny) and stars
%matplotlib inline
import seaborn as sns
sns.heatmap(df.corr())
# display multiple scatter plots (cool, useful, funny) with linear regression line
sns.lmplot(x='cool', y='stars', data=df, ci=95, fit_reg=True)
sns.plt.xlim(-1, 90)
sns.plt.ylim(-1, 10)
sns.lmplot(x='funny', y='stars', data=df, ci=95, fit_reg=True)
sns.plt.xlim(-1, 90)
sns.plt.ylim(-1, 10)
sns.lmplot(x='useful', y='stars', data=df, ci=95, fit_reg=True)
sns.plt.xlim(-1, 90)
sns.plt.ylim(-1, 10)
#feature matrix
feature_cols = ['cool', 'useful', 'funny']
X = df[feature_cols]
#response vector
response_vector = ['stars']
y = df[response_vector]
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X, y)
print reg.intercept_
print reg.coef_
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import numpy as np
# define a function that accepts a list of features and returns testing RMSE
def rmse_train_test(feature_cols):
X = df[feature_cols]
yy = df[response_vector]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=777)
reg = LinearRegression()
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
return np.sqrt(metrics.mean_squared_error(y_test, y_pred))
# calculate RMSE with all three features
#rmse_train_test(feature_cols)
#alt
rmse_train_test(['cool', 'useful', 'funny'])
print("RMSE for funny feature:")
print(rmse_train_test(['funny']))
print("RMSE for cool feature:")
print(rmse_train_test(['cool']))
print("RMSE for useful feature:")
print(rmse_train_test(['useful']))
#cool and useful seem to be the features that work best
print("RMSE for cool and useful feature:")
print(rmse_train_test(['cool', 'useful']))
print("RMSE for cool and funny feature:")
print(rmse_train_test(['cool', 'funny']))
print("RMSE for funny and useful feature:")
print(rmse_train_test(['funny', 'useful']))
print("RMSE for all feature:")
print(rmse_train_test(['cool', 'useful', 'funny']))
# new feature:
df['sum_votes'] = df['cool'] + df['useful'] + df['funny']
# new features:
feature_cols = ['cool', 'useful', 'funny', 'sum_votes']
X = df[feature_cols]
# add new features to the model and calculate RMSE
print("RMSE for total votes:")
print(rmse_train_test(['sum_votes']))
#test train split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=777)
# NUMPY array with the same shape as y_test
y_null = np.zeros_like(y_test, dtype=float)
# fill the array with the mean value of y_test
y_null = pd.DataFrame(y_null)
#print(y_test.mean())
y_null.fillna(3.7732)
# compute null RMSE
np.sqrt(metrics.mean_squared_error(y_test, y_null))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task 1 (Bonus)
Step2: Task 2
Step3: Task 3
Step4: Task 4
Step5: Task 5
Step6: Task 6
Step7: Task 7 (Bonus)
Step8: Task 8 (Bonus)
|
7,530
|
<ASSISTANT_TASK:>
Python Code:
import random
with open('rt-polarity.neg.utf8', 'r') as f:
negative_list = ['-1 '+i for i in f]
with open("rt-polarity.pos.utf8", "r") as f:
positive_list = ["+1"+i for i in f]
#for sentence in temp:
# positive_list.append('+1 '+"".join([i.encode('replace') for i in sentence]))
concatenate = positive_list + negative_list
random.shuffle(concatenate)
with open('sentiment.txt', 'w') as f:
f.write("".join(concatenate))
from nltk.corpus import stopwords
stopwords_list = [s for s in stopwords.words('english')]
print(stopwords_list)
from nltk.stem.porter import PorterStemmer
def feature(sentence):
porter = PorterStemmer()
result = []
label = sentence[0:2]
for s in sentence[3:].split(' '):
try:
result.append(porter.stem(s))
except KeyError:
pass
return (label + " " + " ".join(result))
feature("+1 intensely romantic , thought-provoking and even an engaging mystery . ")
# import passages to construct logistic regression and learn the model.
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
tfv = TfidfVectorizer(encoding='utf-8', lowercase=True,
stop_words=ENGLISH_STOP_WORDS,
#token_pattern='(?u)\b\w\w+\b',
ngram_range=(1, 2))
with open('sentiment.txt') as f:
features = [(s[:2], s[3:]) for s in f]
# make label list
label = [i[0] for i in features]
# make sentence list that is removed English Stop Words
sentence = []
for i in features:
temp = i[1].split(' ')
temp2 = [i+' ' for i in temp]
s = "".join(temp2)
sentence.append(s)
tfv_vector = tfv.fit_transform("".join(sentence).split(' '))
tfv_vector.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 71. ストップワード
Step2: 72. 素性抽出
Step3: No.73
Step4: TfidfVectorizer.fit()の引数は単語"リスト"
|
7,531
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
x = np.array([-2+1j, -1.4, -1.1, 0, 1.2, 2.2+2j, 3.1, 4.4, 8.3, 9.9, 10+0j, 14, 16.2])
result = x[x.imag !=0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,532
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import holoviews as hv
%reload_ext holoviews.ipython
x,y = np.mgrid[-50:51, -50:51] * 0.1
image = hv.Image(np.sin(x**2+y**2), group="Function", label="Sine")
coords = [(0.1*i, np.sin(0.1*i)) for i in range(100)]
curve = hv.Curve(coords)
curves = {phase: hv.Curve([(0.1*i, np.sin(phase+0.1*i)) for i in range(100)])
for phase in [0, np.pi/2, np.pi, np.pi*3/2]}
waves = hv.HoloMap(curves)
layout = image + curve
renderer = hv.Store.renderers['matplotlib'].instance(fig='svg', holomap='gif')
renderer.save(layout, 'example_I')
from IPython.display import SVG
SVG(filename='example_I.svg')
hv.help(image, visualization=False)
hv.help(image)
hv.Store.add_style_opts(hv.Image, ['filternorm'])
# To check that it worked:
RasterPlot = renderer.plotting_class(hv.Image)
print(RasterPlot.style_opts)
RasterPlot.colorbar=True
RasterPlot.set_param(show_title=False,show_frame=True)
renderer.save(layout, 'example_II', style=dict(Image={'cmap':'Blues'}),
plot= dict(Image={'yaxis':None}))
SVG(filename='example_II.svg')
options={'Image.Function.Sine': {'plot':dict(fig_size=50), 'style':dict(cmap='jet')}}
renderer.save(layout, 'example_III',options=options)
SVG(filename='example_III.svg')
green_sine = image(style={'cmap':'Greens'})
green_sine
with hv.StoreOptions.options(green_sine, options={'Image':{'style':{'cmap':'Reds'}}}):
data, info = renderer(green_sine)
print(info)
SVG(data)
%%opts Curve style(linewidth=8) Image style(interpolation='bilinear') plot[yaxis=None] norm{+framewise}
layout
from holoviews.ipython.parser import OptsSpec
renderer.save(image + waves, 'example_V',
options=OptsSpec.parse("Image (cmap='gray')"))
%%output info=True
curve
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Rendering and saving objects from Python <a id='python-saving'></a>
Step2: We could instead have used the default Store.renderer, but that would have been PNG format. Using this renderer, we can save any HoloViews object as SVG or GIF
Step3: That's it! The renderer builds the figure in matplotlib, renders it to SVG, and saves that to "example_I.svg" on disk. Everything up to this point would have worked the same in IPython or in regular Python, even with no display available. But since we're in IPython Notebook at the moment, we can check whether the exporting worked
Step4: You can use this workflow for generating HoloViews visualizations directly from Python, perhaps as a part of a set of scripts that you run automatically, e.g. to put your results up on a web server as soon as data is generated. But so far, this plot just uses all the default options, with no customization. How can we change how the plot will appear when we render it?
Step5: This information can be useful, but we have explicitly suppressed information regarding the visualization parameters -- these all report metadata about your data, not about anything to do with plotting directly. That's because the normal HoloViews components have nothing to do with plotting; they are just simple containers for your data and a small amount of metadata.
Step6: Supported style options
Step7: Changing plot options at the class level
Step8: Here .set_param() allows you to set multiple parameters conveniently, but it works the same as the single-parameter .colorbar example above it. Setting these values at the class level affects all previously created and to-be-created plotting objects of this type, unless specifically overridden via Store as described below.
Step9: As before, the SVG call is simply to display it here in the notebook; the actual image is saved on disk and then loaded back in here for display.
Step10: Here you can see that the y axis has returned, because our previous setting to turn it off was just for the call to renderer.save. But we still have a colorbar, because that parameter was set at the class level, for all future plots of this type. Note that this form of option setting, while more verbose, accepts the full {type}[.{group}[.{label}]] syntax, like 'Image.Function.Sine' or 'Image.Function', while the shorter keyword approach above only supports the class, like 'Image'.
Step11: Here we could save the object to SVG just as before, but in this case we can skip a step and simply view it directly in the notebook
Step12: Both IPython notebook and renderer.save() use the same mechanisms for keeping track of the options, so they will give the same results. Specifically, what happens when you "bind" a set of options to an object is that there is an integer ID stored in the object (green_sine in this case), and a corresponding entry with that ID is stored in a database of options called an OptionTree (kept in holoviews.core.options.Store). The object itself is otherwise unchanged, but then if that object is later used in another container, etc. it will retain its ID and therefore its customization. Any customization stored in an OptionTree will override any class attribute defaults set like RasterGridPlot.border=5 above. This approach lets HoloViews keep track of any customizations you want to make, without ever affecting your actual data objects.
Step13: Here the result is red, because it was rendered within the options context above, but were we to render the green_sine again it would still be green; the options are applied only within the scope of the with statement.
Step14: The %%opts magic works like the pure-Python option for associating options with an object, except that it works on the item in the IPython cell, and it affects the item directly rather than making a copy or applying only in scope. Specifically, it assigns a new ID number to the object returned from this cell, and makes a new OptionTree containing the options for that ID number.
Step15: There is also a special IPython syntax for listing the visualization options for a plotting object in a pop-up window that is equivalent to calling holoviews.help(object)
|
7,533
|
<ASSISTANT_TASK:>
Python Code:
%xmode Minimal
from larray import *
from larray import __version__
__version__
s = 1 + 2
# In the interactive mode, there is no need to use the print() function
# to display the content of the variable 's'.
# Simply typing 's' is enough
s
# In the interactive mode, there is no need to use the print() function
# to display the result of an expression
1 + 2
# define some axes to be used later
age = Axis('age=0-9,10-17,18-66,67+')
gender = Axis('gender=female,male')
time = Axis('time=2015..2017')
# define some data. This is the belgian population (in thousands). Source: eurostat.
data = [[[633, 635, 634],
[663, 665, 664]],
[[484, 486, 491],
[505, 511, 516]],
[[3572, 3581, 3583],
[3600, 3618, 3616]],
[[1023, 1038, 1053],
[756, 775, 793]]]
# create an Array object
population = Array(data, axes=[age, gender, time])
population
# attach some metadata to the population array
population.meta.title = 'population by age, gender and year'
population.meta.source = 'Eurostat'
# display metadata
population.meta
# Array summary: metadata + dimensions + description of axes
population.info
zeros([age, gender])
ones([age, gender])
full([age, gender], fill_value=10.0)
sequence(age)
ndtest([age, gender])
# save our population array to a CSV file
population.to_csv('population_belgium.csv')
population = read_csv('population_belgium.csv')
population
population['67+', 'female', 2017]
population[2017, 'female', '67+']
population['female']
population['female', ['0-9', '10-17']]
# in this case '10-17':'67+' is equivalent to ['10-17', '18-66', '67+']
population['female', '10-17':'67+']
# :'18-66' selects all labels between the first one and '18-66'
# 2017: selects all labels between 2017 and the last one
population[:'18-66', 2017:]
country = Axis(['Belgium', 'Netherlands', 'Germany'], 'country')
citizenship = Axis(['Belgium', 'Netherlands', 'Germany'], 'citizenship')
immigration = ndtest((country, citizenship, time))
immigration
immigration[country['Netherlands'], citizenship['Belgium'], 2017]
for year in time:
print(year)
population
population.sum(gender)
population.sum(age, gender)
population.sum_by(time)
children = age['0-9', '10-17']
children
working = age['18-66'] >> 'working'
working
nonworking = age['0-9', '10-17', '67+'] >> 'nonworking'
nonworking
population
population[working]
population[nonworking]
population.sum(nonworking)
population.sum((children, working, nonworking))
population = zeros([age, gender, time])
births = zeros([age, gender, time])
deaths = zeros([age, gender, time])
# create a session containing the arrays of the model
demography_session = Session(population=population, births=births, deaths=deaths)
# get an array (option 1)
demography_session['population']
# get an array (option 2)
demography_session.births
# modify an array
demography_session.deaths['male'] = 1
# add an array
demography_session.foreigners = zeros([age, gender, time])
# displays names of arrays contained in the session
# (in alphabetical order)
demography_session.names
# dump all arrays contained in demography_session in one HDF5 file
demography_session.save('demography.h5')
# load all arrays saved in the HDF5 file 'demography.h5' and store them in the 'demography_session' variable
demography_session = Session('demography.h5')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To know the version of the LArray library installed on your machine, type
Step2: <div class="alert alert-warning">
Step3: Create an array
Step4: The labels allow to select subsets and to manipulate the data without working with the positions
Step5: You can optionally attach some metadata to an array
Step6: To get a short summary of an array, type
Step7: Create an array filled with predefined values
Step8: Save/Load an array
Step9: The content of the CSV file is then
Step10: Other input/output functions are described in the Input/Output section of the API documentation.
Step11: Labels can be given in arbitrary order
Step12: When selecting a larger subset the result is an array
Step13: When selecting several labels for the same axis, they must be given as a list (enclosed by [ ])
Step14: You can also select slices, which are all labels between two bounds (we usually call them the start and stop
Step15: <div class="alert alert-info">
Step16: If we try to get the number of Belgians living in the Netherlands for the year 2017, we might try something like
Step17: Iterating over an axis
Step18: Aggregation
Step19: We can sum along the 'gender' axis using
Step20: Or sum along both 'age' and 'gender'
Step21: It is sometimes more convenient to aggregate along all axes except some. In that case, use the aggregation
Step22: Groups
Step23: It is often useful to attach them an explicit name using the >> operator
Step24: Still using the same population array
Step25: Groups can be used in selections
Step26: or aggregations
Step27: When aggregating several groups, the names we set above using >> determines the label on the aggregated axis.
Step28: Grouping arrays in a Session
Step29: One of the main interests of using sessions is to save and load many arrays at once
|
7,534
|
<ASSISTANT_TASK:>
Python Code:
# Question 1
# Question 2
# Question 3.1
# Question 3.2
# Question 3.3
# Initialize parameters for the simulation (A, s, T, delta, alpha, g, n, K0, A0, L0)
# Initialize a variable called tfp as a (T+1)x1 array of zeros and set first value to A0
# Compute all subsequent tfp values by iterating over t from 0 through T
# Plot the simulated tfp series
# Initialize a variable called labor as a (T+1)x1 array of zeros and set first value to L0
# Compute all subsequent labor values by iterating over t from 0 through T
# Plot the simulated labor series
# Initialize a variable called capital as a (T+1)x1 array of zeros and set first value to K0
# Compute all subsequent capital values by iterating over t from 0 through T
# Plot the simulated capital series
# Store the simulated capital, labor, and tfp data in a pandas DataFrame called data
# Print the first 5 frows of the DataFrame
# Create columns in the DataFrame to store computed values of the other endogenous variables: Y, C, and I
# Print the first five rows of the DataFrame
# Create columns in the DataFrame to store capital per worker, output per worker, consumption per worker, and investment per worker
# Print the first five rows of the DataFrame
# Create a 2x2 grid of plots of capital, output, consumption, and investment
# Create a 2x2 grid of plots of capital per worker, output per worker, consumption per worker, and investment per worker
# Question 5.1
# Question 5.2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 2
Step2: Question 3
Step3: Question 4
Step4: Question 5
|
7,535
|
<ASSISTANT_TASK:>
Python Code:
import pyspark.sql.functions as F
import pyspark.sql.types as T
from pyspark.sql import SparkSession
# Initialize PySpark with MongoDB and Elastic support
spark = (
SparkSession.builder.appName("Exploring Data with Reports")
# Load support for MongoDB and Elasticsearch
.config("spark.jars.packages", "org.mongodb.spark:mongo-spark-connector_2.12:3.0.1,org.elasticsearch:elasticsearch-spark-30_2.12:7.14.2")
# Add Configuration for MongopDB
.config("spark.mongodb.input.uri", "mongodb://mongo:27017/test.coll")
.config("spark.mongodb.output.uri", "mongodb://mongo:27017/test.coll")
.getOrCreate()
)
sc = spark.sparkContext
sc.setLogLevel("ERROR")
print("\nPySpark initialized...")
# Load the on-time Parquet file
on_time_dataframe = spark.read.parquet('../data/on_time_performance.parquet')
# The first step is easily expressed as SQL: get all unique tail numbers foreach airline
on_time_dataframe.createOrReplaceTempView("on_time_performance")
carrier_airplane = spark.sql(
"SELECT DISTINCT Carrier, TailNum FROM on_time_performance"
)
carrier_airplane.limit(5).toPandas()
# How to do things in RDDs - so you know. This shows how to store a sorted list of tail numbers for each carrier, along with a fleet count
tuple_pair = carrier_airplane.rdd.map(
lambda nameTuple: (nameTuple[0], [nameTuple[1]])
)
tuple_pair.first()
# How to do things in RDDs - so you know
reduced_pairs = tuple_pair.reduceByKey(lambda a, b: a + b)
reduced_pairs.first()
# How to do things in RDDs - so you know
final_records = reduced_pairs.map(lambda tuple:
{
'Carrier': tuple[0],
'TailNumbers': sorted(
filter(
lambda x: x is not None and x != '', tuple[1] # empty string tail numbers were
# getting through
)
),
'FleetCount': len(tuple[1])
}
)
final_records.first()
# How to do things in DataFrames - so you can store data to MongoDB
final_records = (
carrier_airplane.groupBy("Carrier").agg(
F.collect_list("TailNum").alias("TailNumbers")
)
.select(
"Carrier",
"TailNumbers",
F.size("TailNumbers").alias("FleetCount")
)
)
final_records.show()
# Determine the total number of airlines in the dataset
total_airlines = final_records.count()
print("Total airlines making up US fleet: {}".format(total_airlines))
# Save chart to MongoDB
(
final_records.write.format("mongo")
.mode("append")
.option("database", "agile_data_science")
.option("collection", "airplanes_per_carrier")
.save()
)
print("Wrote to MongoDB!")
%%bash
mongosh mongo:27017/agile_data_science --quiet --eval 'db.airplanes_per_carrier.count()'
mongosh mongo:27017/agile_data_science --quiet --eval 'db.airplanes_per_carrier.findOne()'
# Get the carriers' airports by origin
carrier_origins = on_time_dataframe.select("Carrier", F.col("Origin").alias("Airport"))
# Get the carriers' airports by destination
carrier_dests = on_time_dataframe.select("Carrier", F.col("Dest").alias("Airport"))
# Combine the origin and destination carrier airports and deduplicate
carrier_airports = carrier_origins.union(carrier_dests).distinct()
airports_per_carrier = carrier_airports.groupBy("Carrier").agg(
F.collect_set("Airport").alias("Airports")
)
airports_per_carrier.toPandas()
# Save chart to MongoDB
(
airports_per_carrier.write.format("mongo")
.mode("append")
.option("database", "agile_data_science")
.option("collection", "airports_per_carrier")
.save()
)
print("Wrote to MongoDB!")
carriers_per_airport = (
carrier_airports.groupBy("Airport")
.agg(
F.collect_set("Carrier").alias("Carriers")
)
)
carriers_per_airport.limit(10).toPandas()
# Save chart to MongoDB
(
carriers_per_airport.write.format("mongo")
.mode("append")
.option("database", "agile_data_science")
.option("collection", "carriers_per_airport")
.save()
)
print("Wrote to MongoDB!")
# Load the on-time Parquet file
on_time_dataframe = spark.read.parquet('../data/on_time_performance.parquet')
# The first step is easily expressed as SQL: get all unique tail numbers
# for each airline
on_time_dataframe.createOrReplaceTempView("on_time_performance")
carrier_codes = spark.sql(
"SELECT DISTINCT Carrier FROM on_time_performance"
)
carrier_codes.show()
carrier_codes = on_time_dataframe.select("Carrier").distinct()
carrier_codes.show()
%%bash
cat ../data/airlines.csv | grep '"DL"\|"NW"\|"AA"'
airlines = (
spark.read.format('com.databricks.spark.csv')
.options(
header='false',
nullValue='\\N'
)
.load('../data/airlines.csv')
)
airlines.limit(5).toPandas()
# Is Delta around?
airlines.filter(airlines._c3 == 'DL').show()
# Drop fields except for C1 as name, C3 as carrier code
airlines.createOrReplaceTempView("airlines")
airlines = spark.sql("SELECT _c1 AS Name, _c3 AS CarrierCode from airlines")
# Check out the OpenFlights airlines record for Delta
airlines.filter(airlines.CarrierCode == 'DL').show()
# Check out the FAA codes record for Delta
carrier_codes.filter(carrier_codes.Carrier == 'DL').show()
# Join our 14 carrier codes to the airlines table to get our set of airlines
our_airlines = carrier_codes.join(
airlines,
carrier_codes.Carrier == airlines.CarrierCode,
how="left"
)
our_airlines = our_airlines.select('Name', 'CarrierCode')
our_airlines.show()
# Up repartitioning
our_airlines.repartition(1).write.mode('overwrite').json("../data/our_airlines.json")
# Down repartitioning
our_airlines.coalesce(1).write.mode('overwrite').json("../data/our_airlines.json")
our_airlines.rdd.getNumPartitions()
%%bash
cat ../data/our_airlines.json/part*.json >> ../data/our_airlines.jsonl
cat ../data/our_airlines.jsonl | sort | uniq > /tmp/our_airlines.jsonl
cp /tmp/our_airlines.jsonl ../data/our_airlines.jsonl
cat ../data/our_airlines.jsonl
import sys, os, re
sys.path.append("../lib")
import utils
import wikipedia
from bs4 import BeautifulSoup
import tldextract
# Load our airlines...
our_airlines = utils.read_json_lines_file('../data/our_airlines.jsonl')
print(our_airlines)
# Build a new list that includes Wikipedia data
with_url = []
for airline in our_airlines[1:]:
# Get the Wikipedia page for the airline name
print(airline["Name"])
if airline["Name"] == "United Airlines":
print("Skipped...")
continue
wikipage = wikipedia.page(airline['Name'])
# Get the summary
summary = wikipage.summary
airline['summary'] = summary
# Get the HTML of the page
page = BeautifulSoup(wikipage.html())
# Task: get the logo from the right 'vcard' column
# 1) Get the vcard table
vcard_table = page.find_all('table', class_='vcard')[0]
# 2) The logo is always the first image inside this table
first_image = vcard_table.find_all('img')[0]
# 3) Set the URL to the image
logo_url = 'http:' + first_image.get('src')
airline['logo_url'] = logo_url
# Task: get the company website
# 1) Find the 'Website' table header
th = page.find_all('th', text='Website')[0]
# 2) Find the parent tr element
tr = th.parent
# 3) Find the a (link) tag within the tr
a = tr.find_all('a')[0]
# 4) Finally, get the href of the a tag
url = a.get('href')
airline['url'] = url
# Get the domain to display with the URL
url_parts = tldextract.extract(url)
airline['domain'] = url_parts.domain + '.' + url_parts.suffix
print(airline)
with_url.append(airline)
utils.write_json_lines_file(with_url, '../data/our_airlines_with_wiki.jsonl')
%%bash
# How many airlines did we cover?
echo `cat ../data/our_airlines_with_wiki.jsonl | wc -l`
echo
# Check out a sample
head -1 ../data/our_airlines_with_wiki.jsonl
%%bash
mongoimport --uri mongodb://mongo:27017 -d agile_data_science -c airlines --file ../data/our_airlines_with_wiki.jsonl
%%bash
mongosh mongodb://mongo:27017/agile_data_science --quiet --eval 'db.airlines.findOne()'
%%bash
# How many airports are in the dataset?
cat ../data/airports.csv | wc -l
import pyspark.sql.functions as F
# Load the flight data
on_time_performance = spark.read.parquet("../data/on_time_performance.parquet")
on_time_performance.createOrReplaceTempView("on_time_performance")
# Get the airport codes based on flight origins
origins = on_time_performance.select(
F.col("Origin").alias("AirportCode")
).distinct()
# Get the airport codes based on flight destinations
dests = on_time_performance.select(
F.col("Dest").alias("AirportCode")
).distinct()
airports = origins.union(dests)
print(f"Total airports: {airports.count():,}")
airports.limit(5).toPandas()
# Load airports.csv and look at it
airports_dat = spark.read.csv("../data/airports.csv")
airports_dat.limit(5).toPandas()
airports_metadata = airports_dat.toDF("Airport ID", "Name", "City", "Country", "IATA", "ICAO", "Latitude", "Longitude", "Altitude", "Timezone", "DST", "TZ_DB", "Type", "Source")
airports_metadata.limit(5).toPandas()
airports_metadata.filter(F.col("IATA") == "ATL").toPandas()
super_airports = airports.join(
airports_metadata,
on=airports.AirportCode == airports_metadata.IATA,
how="left"
)
print(f"Join count: {super_airports.count():,}")
print(f"Join coverage: {(super_airports.count() / airports.count()) * 100:,}%")
super_airports.limit(5).toPandas()
# Save airport metadata to MongoDB
(
super_airports.write.format("mongo")
.mode("overwrite")
.option("database", "agile_data_science")
.option("collection", "airport_metadata")
.save()
)
print("Wrote to MongoDB!")
# Load the FAA N-Number inquiry records
faa_tail_number_inquiry = spark.read.json('../data/faa_tail_number_inquiry.jsonl')
faa_tail_number_inquiry.show()
# Count the records
faa_tail_number_inquiry.count()
# Load our unique tail numbers
unique_tail_numbers = spark.read.json('../data/tail_numbers.jsonl')
unique_tail_numbers.show()
# Join tail numbers to our inquries
tail_num_plus_inquiry = unique_tail_numbers.join(
faa_tail_number_inquiry,
unique_tail_numbers.TailNum == faa_tail_number_inquiry.TailNum,
)
tail_num_plus_inquiry = tail_num_plus_inquiry.drop(unique_tail_numbers.TailNum)
tail_num_plus_inquiry.show()
# Dump extra field and store tail_numbers plus inquiry
tail_num_plus_inquiry.createOrReplaceTempView("tail_num_plus_inquiry")
airplanes = spark.sql(SELECT
TailNum AS TailNum,
engine_manufacturer AS EngineManufacturer,
engine_model AS EngineModel,
manufacturer AS Manufacturer,
mfr_year AS ManufacturerYear,
model AS Model,
owner AS Owner,
owner_state AS OwnerState,
serial_number AS SerialNumber
FROM
tail_num_plus_inquiry)
airplanes.repartition(1).write.mode('overwrite').json('../data/airplanes.json')
%%bash
rm ../data/airplanes.jsonl
cat ../data/airplanes.json/part* >> ../data/airplanes.jsonl
head -5 ../data/airplanes.jsonl
airplanes = spark.read.json('../data/airplanes.jsonl')
airplanes.createOrReplaceTempView("airplanes")
manufacturer_counts = spark.sql(SELECT
Manufacturer,
COUNT(*) AS Total
FROM
airplanes
GROUP BY
Manufacturer
ORDER BY
Total DESC
)
manufacturer_counts.limit(10).toPandas()
# How many airplanes total?
total_airplanes = spark.sql(
SELECT
COUNT(*) AS OverallTotal
FROM airplanes
)
print("Total airplanes: {:,}".format(total_airplanes.collect()[0].OverallTotal))
mfr_with_totals = manufacturer_counts.crossJoin(total_airplanes)
mfr_with_totals = mfr_with_totals.rdd.map(
lambda x: {
'Manufacturer': x.Manufacturer,
'Total': x.Total,
'Percentage': round(
(
float(x.Total)/float(x.OverallTotal)
) * 100,
2
)
}
)
mfr_with_totals.toDF().show()
relative_manufacturer_counts = spark.sql(SELECT
Manufacturer,
ROUND(
100 * (
COUNT(*)/(SELECT COUNT(*) FROM airplanes)
),
2
) AS Percentage,
COUNT(*) AS Total
FROM
airplanes
GROUP BY
Manufacturer
ORDER BY
Total DESC, Manufacturer
)
relative_manufacturer_counts.show(20)
%%bash
curl -XPUT 'http://elastic:9200/agile_data_science_airplanes/' \
-H 'Content-Type: application/json' -d '{
"settings" : {
"index" : {
"number_of_shards" : 1,
"number_of_replicas" : 1
}
}
}'
# Load our airplanes
airplanes = spark.read.json("../data/airplanes.json")
airplanes.show(10)
(
airplanes.write.format("org.elasticsearch.spark.sql")
.option("es.nodes", "elastic")
.option("es.port", "9200")
.option("es.nodes.wan.only", "true")
.mode("Overwrite").save("airplanes/_doc")
)
print("Records saved to Elasticsearch!")
%%bash
curl -XGET 'http://localhost:9200/agile_data_science/airplanes/_search?q=*' | jq ''
#
# Now get these things on the web
#
relative_manufacturer_counts_dicts = relative_manufacturer_counts.rdd.map(lambda row: row.asDict())
grouped_manufacturer_counts = relative_manufacturer_counts_dicts.groupBy(lambda x: 1)
# Save to Mongo in the airplanes_per_carrier relation
import pymongo_spark
pymongo_spark.activate()
grouped_manufacturer_counts.saveToMongoDB(
'mongodb://localhost:27017/agile_data_science.airplane_manufacturer_totals'
)
print("Records stored to MongoDB!")
%%bash
mongo agile_data_science --quiet --eval 'db.airplane_manufacturer_totals.findOne()'
%%bash
mongo agile_data_science --quiet --eval 'db.airplane_manufacturer_totals.drop()'
relative_manufacturer_counts = spark.sql(SELECT
Manufacturer,
COUNT(*) AS Total,
ROUND(
100 * (
COUNT(*)/(SELECT COUNT(*) FROM airplanes)
),
2
) AS PercentageTotal
FROM
airplanes
GROUP BY
Manufacturer
ORDER BY
Total DESC, Manufacturer
LIMIT 10)
relative_manufacturer_counts.show()
#
# Now get these things on the web
#
relative_manufacturer_counts_dicts = relative_manufacturer_counts.rdd.map(lambda row: row.asDict())
grouped_manufacturer_counts = relative_manufacturer_counts_dicts.groupBy(lambda x: 1)
# Save to Mongo in the airplanes_per_carrier relation
import pymongo_spark
pymongo_spark.activate()
grouped_manufacturer_counts.saveToMongoDB(
'mongodb://localhost:27017/agile_data_science.airplane_manufacturer_totals'
)
print("Records stored to MongoDB!")
airplanes = spark.read.json('../data/airplanes.json')
airplanes.createOrReplaceTempView("airplanes")
manufacturer_variety = spark.sql(
SELECT
DISTINCT(Manufacturer) AS Manufacturer
FROM
airplanes
ORDER BY
Manufacturer
)
manufacturer_variety_local = manufacturer_variety.collect()
# We need to print these left justified
for mfr in manufacturer_variety_local:
print(mfr.Manufacturer)
def longest_common_beginning(s1, s2):
Detect the longest common beginning string in a pair of strings
if s1 == s2:
return s1
min_length = min(len(s1), len(s2))
i = 0
while i < min_length:
if s1[i] == s2[i]:
i += 1
else:
break
return s1[0:i]
def compare_manufacturers(mfrs):
Compare two manufacturers, returning a tuple describing the result
mfr1 = mfrs[0]
mfr2 = mfrs[1]
lcb = longest_common_beginning(mfr1, mfr2)
lcb = lcb.strip() # remove extra spaces
len_lcb = len(lcb)
record = {
'mfr1': mfr1,
'mfr2': mfr2,
'lcb': lcb,
'len_lcb': len_lcb,
'eq': mfr1 == mfr2
}
return record
# Pair every unique instance of Manufacturer field with every other for comparison
comparison_pairs = manufacturer_variety.crossJoin(manufacturer_variety)
# Do the comparisons
comparisons = comparison_pairs.rdd.map(compare_manufacturers)
# Matches have > 5 starting chars in common
matches = comparisons.filter(lambda f: f['eq'] == False and f['len_lcb'] > 5)
matches.take(5)
#
# Now we create a mapping of duplicate keys from their raw value to the one we're going to use
#
# 1) Group the matches by the longest common beginning ('lcb')
common_lcbs = matches.groupBy(lambda x: x['lcb'])
# 2) Emit the raw value for each side of the match along with the key, our 'lcb'
mfr1_map = common_lcbs.map(lambda x: [(y['mfr1'], x[0]) for y in x[1]]).flatMap(lambda x: x)
mfr2_map = common_lcbs.map(lambda x: [(y['mfr2'], x[0]) for y in x[1]]).flatMap(lambda x: x)
# 3) Combine the two sides of the comparison's records
map_with_dupes = mfr1_map.union(mfr2_map)
# 4) Remove duplicates
mfr_dedupe_mapping = map_with_dupes.distinct()
# 5) Convert mapping to dataframe to join to airplanes dataframe
mapping_dataframe = mfr_dedupe_mapping.toDF()
# 6) Give the mapping column names
mapping_dataframe.createOrReplaceTempView("mapping_dataframe")
mapping_dataframe = spark.sql(
"SELECT _1 AS Raw, _2 AS NewManufacturer FROM mapping_dataframe"
)
# JOIN our mapping left outer...
airplanes_w_mapping = airplanes.join(
mapping_dataframe,
on=airplanes.Manufacturer == mapping_dataframe.Raw,
how='left_outer'
)
# Now replace Manufacturer with NewManufacturer where needed
airplanes_w_mapping.createOrReplaceTempView("airplanes_w_mapping")
resolved_airplanes = spark.sql(SELECT
TailNum,
SerialNumber,
Owner,
OwnerState,
IF(NewManufacturer IS NOT null,NewManufacturer,Manufacturer) AS Manufacturer,
Model,
ManufacturerYear,
EngineManufacturer,
EngineModel
FROM
airplanes_w_mapping)
# Store for later use, in place of airplanes.json
resolved_airplanes.repartition(1).write.mode("overwrite").json("../data/resolved_airplanes.json")
# JOIN our mapping left outer...
airplanes_w_mapping = airplanes.join(
mapping_dataframe,
on=airplanes.Manufacturer == mapping_dataframe.Raw,
how='left_outer'
)
# Now replace Manufacturer with NewManufacturer where needed
airplanes_w_mapping.createOrReplaceTempView("airplanes_w_mapping")
resolved_airplanes = spark.sql(SELECT
TailNum,
SerialNumber,
Owner,
OwnerState,
IF(NewManufacturer IS NOT null,NewManufacturer,Manufacturer) AS Manufacturer,
Model,
ManufacturerYear,
EngineManufacturer,
EngineModel
FROM
airplanes_w_mapping)
# Store for later use, in place of airplanes.json
resolved_airplanes.repartition(1).write.mode("overwrite").json("../data/resolved_airplanes.json")
%%bash
cat ../data/resolved_airplanes.json/part* >> ../data/resolved_airplanes.jsonl
head -5 ../data/resolved_airplanes.jsonl
%%bash
mongo agile_data_science --quiet --eval 'db.airplane_manufacturer_totals.drop()'
airplanes = spark.read.json('../data/resolved_airplanes.json')
airplanes.createOrReplaceTempView("airplanes")
#
# Same with sub-queries
#
relative_manufacturer_counts = spark.sql(SELECT
Manufacturer,
COUNT(*) AS Total,
ROUND(
100 * (
COUNT(*)/(SELECT COUNT(*) FROM airplanes)
),
2
) AS PercentageTotal
FROM
airplanes
GROUP BY
Manufacturer
ORDER BY
Total DESC, Manufacturer
LIMIT 10
)
relative_manufacturer_counts.show(30) # show top 30
#
# Now get these things on the web
#
relative_manufacturer_counts_dict = relative_manufacturer_counts.rdd.map(lambda row: row.asDict())
grouped_manufacturer_counts = relative_manufacturer_counts_dict.groupBy(lambda x: 1)
# Save to Mongo in the airplanes_per_carrier relation
import pymongo_spark
pymongo_spark.activate()
grouped_manufacturer_counts.saveToMongoDB(
'mongodb://localhost:27017/agile_data_science.airplane_manufacturer_totals'
)
print("Data stored to MongoDB!")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Extracting Airlines (Entities)
Step2: Compound Records in RDDs
Step3: Compound DataFrames in MongoDB
Step4: Storing to MongoDB
Step5: Verifying Records in MongoDB
Step6: Exercises
Step7: Building an Airline Page in Flask
Step8: Creating an All Airlines Home Page
Step9: Adding Names to Carrier Codes
Step10: Loading OpenFlights in PySpark
Step11: Lets check if Delta Airlines is in the data, carrier code DL.
Step12: Joining FAA Carrier Codes to OpenFlights Carrier Records
Step13: Verifying Our Result
Step14: Incorporating Wikipedia Content
Step15: Verifying Our Result
Step16: Publishing Enriched Airlines to Mongo
Step17: Verifying Records in Mongo
Step18: Enriched Airlines on the Web
Step20: Investigating Airplanes (Entities)
Step21: As before, we can copy this directory of data into a single file for convenient access outside of Spark—note that this is not a good idea for very large files, but in this case our airplanes data is less than one megabyte
Step23: Boeing Versus Airbus
Step25: Interesting, Boeing planes outnumber Airbus planes by 4 to 1! I had no idea; I thought it was much closer than this. However, what I really want to know is who has what share of the market (without having to compute a ratio in my head). In other words, I’d like to see this data as a percentage.
Step27: This is clearly an out-of-the-way method of calculating percentage totals, but it illustrates how dataflow programming works in more complex examples as well.
Step28: Creating an Airplanes Home Page
Step29: We can verify our documents are there with a quick search
Step30: Now we can add search capability to our /airplanes controller. Recall that we we did this in Chapter 5 for flight search, and that it took several pages of code. This time, we are going to build a reusable component for searching and displaying records in a Flask application.
Step31: Next, check that the data is in Mongo
Step33: The rest is similar to the bar chart from Chapter 5. We add a controller to ch06/web/report_flask.py where we grab the chart from Mongo, and return it as JSON
Step36: Now check out http
Step40: It turns out that we don’t have very much variety at all
Step42: Now we can employ the mapping table we have created. Note that this table could have been prepared manually, given the small number of records, and in that case this is the point at which you would load the mapping table as CSV (and run the next code block)
Step43: Again, for convenience, let’s create a single JSON Lines file and check out the result
Step44: Cleaning out MongoDB
Step46: We need to run the code below, which is just a copy of the original ch06/analyze_airplanes.py with the new path for our resolved airplanes plugged in.
|
7,536
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google/starthinker
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
FIELDS = {
'recipe_name':'', # Name of document to deploy to.
}
print("Parameters Set To: %s" % FIELDS)
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'drive':{
'auth':'user',
'hour':[
],
'copy':{
'source':'https://docs.google.com/spreadsheets/d/1tt597dMsAaxYXaJdifwKYNVzJrIl6E9Pe8GysfVrWOs/',
'destination':{'field':{'name':'recipe_name','prefix':'CM User Editor For ','kind':'string','order':1,'description':'Name of document to deploy to.','default':''}}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set Configuration
Step2: 3. Enter CM360 Campaign Auditor Recipe Parameters
Step3: 4. Execute CM360 Campaign Auditor
|
7,537
|
<ASSISTANT_TASK:>
Python Code:
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
import mne
from mne.datasets import sample
from mne.source_space import compute_distance_to_sensors
from mne.source_estimate import SourceEstimate
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif'
subjects_dir = data_path / 'subjects'
# Read the forward solutions with surface orientation
fwd = mne.read_forward_solution(fwd_fname)
mne.convert_forward_solution(fwd, surf_ori=True, copy=False)
leadfield = fwd['sol']['data']
print("Leadfield size : %d x %d" % leadfield.shape)
grad_map = mne.sensitivity_map(fwd, ch_type='grad', mode='fixed')
mag_map = mne.sensitivity_map(fwd, ch_type='mag', mode='fixed')
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
picks_meg = mne.pick_types(fwd['info'], meg=True, eeg=False)
picks_eeg = mne.pick_types(fwd['info'], meg=False, eeg=True)
fig, axes = plt.subplots(2, 1, figsize=(10, 8), sharex=True)
fig.suptitle('Lead field matrix (500 dipoles only)', fontsize=14)
for ax, picks, ch_type in zip(axes, [picks_meg, picks_eeg], ['meg', 'eeg']):
im = ax.imshow(leadfield[picks, :500], origin='lower', aspect='auto',
cmap='RdBu_r')
ax.set_title(ch_type.upper())
ax.set_xlabel('sources')
ax.set_ylabel('sensors')
fig.colorbar(im, ax=ax)
fig_2, ax = plt.subplots()
ax.hist([grad_map.data.ravel(), mag_map.data.ravel(), eeg_map.data.ravel()],
bins=20, label=['Gradiometers', 'Magnetometers', 'EEG'],
color=['c', 'b', 'k'])
fig_2.legend()
ax.set(title='Normal orientation sensitivity',
xlabel='sensitivity', ylabel='count')
brain_sens = grad_map.plot(
subjects_dir=subjects_dir, clim=dict(lims=[0, 50, 100]), figure=1)
brain_sens.add_text(0.1, 0.9, 'Gradiometer sensitivity', 'title', font_size=16)
# source space with vertices
src = fwd['src']
# Compute minimum Euclidean distances between vertices and MEG sensors
depths = compute_distance_to_sensors(src=src, info=fwd['info'],
picks=picks_meg).min(axis=1)
maxdep = depths.max() # for scaling
vertices = [src[0]['vertno'], src[1]['vertno']]
depths_map = SourceEstimate(data=depths, vertices=vertices, tmin=0.,
tstep=1.)
brain_dep = depths_map.plot(
subject='sample', subjects_dir=subjects_dir,
clim=dict(kind='value', lims=[0, maxdep / 2., maxdep]), figure=2)
brain_dep.add_text(0.1, 0.9, 'Source depth (m)', 'title', font_size=16)
corr = np.corrcoef(depths, grad_map.data[:, 0])[0, 1]
print('Correlation between source depth and gradiomter sensitivity values: %f.'
% corr)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute sensitivity maps
Step2: Show gain matrix a.k.a. leadfield matrix with sensitivity map
Step3: Compare sensitivity map with distribution of source depths
Step4: Sensitivity is likely to co-vary with the distance between sources to
|
7,538
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import random
import h5py
import time
from keras.models import Sequential
from keras.layers import Dense, Flatten, BatchNormalization, Dropout, Input
from keras.layers.convolutional import Conv1D, MaxPooling1D, AveragePooling1D
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras_contrib.layers import InstanceNormalization
from keras.layers import RepeatVector,Add
from keras.layers import UpSampling2D, Reshape, Activation
from keras.models import Model
import keras.initializers
# activation function used following every layer except for the output layers
activation = 'relu'
# model weight initializer
initializer = 'he_normal'
num_fluxes = 7514
num_labels = 3
# shape of input spectra that is fed into the input layer
input_shape = (None,num_fluxes,1)
# number of filters used in the convolutional layers
num_filters = 8
# length of the filters in the convolutional layers
filter_length = 3
# length of the maxpooling window
pool_length = 4
# number of nodes in each of the hidden fully connected layers
num_hidden = [256,128]
# number of spectra fed into model at once during training
batch_size = 64
# maximum number of interations for model training
max_epochs = 30
# initial learning rate for optimization algorithm
lr = 0.0001
# exponential decay rate for the 1st moment estimates for optimization algorithm
beta_1 = 0.9
# exponential decay rate for the 2nd moment estimates for optimization algorithm
beta_2 = 0.999
# a small constant for numerical stability for optimization algorithm
optimizer_epsilon = 1e-08
early_stopping_min_delta = 0.0001
early_stopping_patience = 4
reduce_lr_factor = 0.5
reuce_lr_epsilon = 0.0009
reduce_lr_patience = 2
reduce_lr_min = 0.00008
loss_function = 'mean_squared_error'
def build_model(input_spec):
# input conv layer with filter length 1, no bias value
x = Conv1D(kernel_initializer=keras.initializers.Constant(0.5),
activation='linear', padding="same", filters=1,
kernel_size=1,use_bias=False)(input_spec)
# instance normalize to bring each spectrum to zero-mean and unit variance
normed_spec = InstanceNormalization()(x)
# upsample the spectra so that they can be easily added to the output of the conv blocks
# this method just repeats the spectra n=num_filters times
normed_spec = Reshape((num_fluxes,1,1))(normed_spec)
repeated_spec = UpSampling2D(size=(1, num_filters))(normed_spec)
# reshape spectra and repeated spectra to proper shape for 1D Conv layers
repeated_spec = Reshape((num_fluxes,num_filters))(repeated_spec)
x = Reshape((num_fluxes,1))(normed_spec)
# Conv block w/ InstanceNorm w/ dropout
x = Conv1D(kernel_initializer=initializer, padding="same", filters=num_filters,
kernel_size=filter_length)(x)
x = Activation('relu')(x)
x = InstanceNormalization()(x)
x = Conv1D(kernel_initializer=initializer, padding="same", filters=num_filters,
kernel_size=filter_length)(x)
x = Activation('relu')(x)
x = InstanceNormalization()(x)
x = Add()([x, repeated_spec])
x = Dropout(0.2)(x)
# Conv block w/ InstanceNorm w/o dropout
x = Conv1D(kernel_initializer=initializer, padding="same", filters=num_filters,
kernel_size=filter_length)(x)
x = Activation('relu')(x)
x = InstanceNormalization()(x)
x = Conv1D(kernel_initializer=initializer, padding="same", filters=num_filters,
kernel_size=filter_length)(x)
x = Activation('relu')(x)
x = InstanceNormalization()(x)
x = Add()([x, repeated_spec])
# Avg pooling w/ dropout (DO NOT APPLY DROPOUT BEFORE POOLING)
x = AveragePooling1D(pool_size=pool_length)(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
# Fully connected blocks w/ BatchNorm
x = Dense(num_hidden[0], kernel_initializer=initializer)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.3)(x)
x = Dense(num_hidden[1], kernel_initializer=initializer)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(0.2)(x)
# output nodes
output_pred = Dense(units=num_labels, activation="linear")(x)
return Model(input_spec,output_pred)
input_spec = Input(shape=(num_fluxes,1,))
model = build_model(input_spec)
optimizer = Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=optimizer_epsilon, decay=0.0)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=early_stopping_min_delta,
patience=early_stopping_patience, verbose=2, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.5, epsilon=reuce_lr_epsilon,
patience=reduce_lr_patience, min_lr=reduce_lr_min, mode='min', verbose=2)
model.compile(optimizer=optimizer, loss=loss_function)
model.summary()
# hack to load pre-computed mean and std-dev for faster normalization
mean_and_std = np.load('/data/stars/apogee/dr14/aspcap_labels_mean_and_std.npy')
mean_labels = mean_and_std[0]
std_labels = mean_and_std[1]
num_labels = mean_and_std.shape[1]
def normalize(lb):
return (lb-mean_labels)/std_labels
data_file = '/data/stars/apogee/dr14/starnet_training_data.h5'
with h5py.File(data_file,"r") as F:
spectra = F["spectrum"][:]
labels = np.column_stack((F["TEFF"][:],F["LOGG"][:],F["FE_H"][:]))
# Normalize labels
labels = normalize(labels)
print('Reference set includes '+str(len(spectra))+' individual visit spectra.')
# define the number of wavelength bins (typically 7214)
num_fluxes = spectra.shape[1]
print('Each spectrum contains '+str(num_fluxes)+' wavelength bins')
num_train=int(0.9*len(labels))
# set NaN values to zero
indices_nan = np.where(np.isnan(spectra))
spectra[indices_nan]=0.
# some visit spectra are just zero-vectors... remove these.
spec_std = np.std(spectra,axis=1)
spec_std = spec_std.reshape(spec_std.shape[0],1)
indices = np.where(spec_std!=0.)[0]
spectra = spectra[indices]
labels = labels[indices]
reference_data = np.column_stack((spectra,labels))
np.random.shuffle(reference_data)
train_spectra = reference_data[0:num_train,0:num_fluxes]
# Reshape spectra for convolutional layers
train_spectra = train_spectra.reshape(train_spectra.shape[0], train_spectra.shape[1], 1)
train_labels = reference_data[0:num_train,num_fluxes:]
cv_spectra = reference_data[num_train:,0:num_fluxes]
cv_spectra = cv_spectra.reshape(cv_spectra.shape[0], cv_spectra.shape[1], 1)
cv_labels = reference_data[num_train:,num_fluxes:]
reference_data=[]
spectra=[]
labels=[]
print('Training set includes '+str(len(train_spectra))+' spectra and the cross-validation set includes '+str(len(cv_spectra))+' spectra')
time1 = time.time()
# Train model
model.fit(train_spectra, train_labels, validation_data=(cv_spectra, cv_labels),
epochs=max_epochs, batch_size=batch_size, verbose=2,
callbacks=[reduce_lr,early_stopping])
time2 = time.time()
print("\n" + str(time2-time1) + " seconds for training\n")
# Save model in current directory
model.save('StarNet_DR14.h5')
def build_normalizer_model(input_spec):
# input conv layer with filter length 1 to flatten the shape
x = Conv1D(kernel_initializer=keras.initializers.Constant(0.5), activation='linear', padding="same", filters=1,
kernel_size=1,use_bias=False)(input_spec)
# instance normalize to bring each spectrum to zero-mean and unit variance
normed_spec = InstanceNormalization()(x)
return Model(input_spec,normed_spec)
input_spec = Input(shape=(num_fluxes,1,))
model = build_normalizer_model(input_spec)
model.summary()
normalized_cv = model.predict(cv_spectra)
import matplotlib.pyplot as plt
%matplotlib inline
for i in range(10):
fig, axes = plt.subplots(2,1,figsize=(70, 10))
axes[0].plot(cv_spectra[i,:,0],c='b')
axes[1].plot(normalized_cv[i,:,0],c='r')
axes[1].set_ylim((-4,4))
plt.show()
def build_upsample_model(input_spec):
# input conv layer with filter length 1, no bias value
x = Conv1D(kernel_initializer=keras.initializers.Constant(0.5), activation='linear', padding="same", filters=1,
kernel_size=1,use_bias=False)(input_spec)
# instance normalize to bring each spectrum to zero-mean and unit variance
normed_spec = InstanceNormalization()(x)
# upsample the spectra so that they can be easily added to the output of the conv layers
# this method just repeats the spectra n=num_filters times
normed_spec = Reshape((num_fluxes,1,1))(normed_spec)
repeated_spec = UpSampling2D(size=(1, num_filters))(normed_spec)
repeated_spec = Reshape((num_fluxes,num_filters))(repeated_spec)
return Model(input_spec,repeated_spec)
input_spec = Input(shape=(num_fluxes,1,))
model = build_upsample_model(input_spec)
model.summary()
upsampled_cv = model.predict(cv_spectra[0:100])
# Plot the input spectra, then the normalized upsampled spectra
for i in range(5):
fig, axes = plt.subplots(9,1,figsize=(70, 10))
axes[0].plot(cv_spectra[i,:,0],c='b')
for ii in range(8):
axes[ii+1].plot(upsampled_cv[i,:,ii],c='r')
axes[ii+1].set_ylim((-4,4))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hyper parameters for the model
Step2: Build and compile model
Step3: Load non-normalized spectra
Step4: Spectra Normalization
Step5: Plot the input spectra, then the normalized spectra. I will force the second of the two plots to have the same y-axis range to ensure that the range for our normalized spectra are similar to one another
Step6: We may want to do some pre-processing clipping to the spectra to elminate the outliers
|
7,539
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function # Python 2/3 compatibility
import numpy as np
import pandas as pd
from IPython.display import Image
train_df = pd.read_csv("data/train.tsv", sep="\t")
train_df.sample(10)
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(train_df["review"], train_df["sentiment"], test_size=0.2)
print("Training Data: {}, Validation: {}".format(len(X_train), len(X_valid)))
from sklearn.feature_extraction.text import CountVectorizer
# The API is very similar to model building process.
# Step 1: Instantiate the Vectorizer or more generally called Transformer
vect = CountVectorizer(max_features=5000, binary=True, stop_words="english")
# Fit your Training Data
vect.fit(X_train)
# Transform your training and validation data
X_train_vect = vect.transform(X_train)
X_valid_vect = vect.transform(X_valid)
from sklearn.linear_model import LogisticRegression
model_1 = LogisticRegression()
model_1.fit(X_train_vect, y_train)
# Training Accuracy
print("Training Accuracy: {:.3f}".format(model_1.score(X_train_vect, y_train)))
print("Validation Accuracy: {:.3f}".format(model_1.score(X_valid_vect, y_valid)))
from sklearn.naive_bayes import MultinomialNB
model_2 = MultinomialNB()
model_2.fit(X_train_vect, y_train)
# Training Accuracy
print("Training Accuracy: {:.3f}".format(model_2.score(X_train_vect, y_train)))
print("Validation Accuracy: {:.3f}".format(model_2.score(X_valid_vect, y_valid)))
from sklearn.ensemble import RandomForestClassifier
model_3 = RandomForestClassifier(min_samples_leaf=3, n_estimators=25, n_jobs=-1)
model_3.fit(X_train_vect, y_train)
# Training Accuracy
print("Training Accuracy: {:.3f}".format(model_3.score(X_train_vect, y_train)))
print("Validation Accuracy: {:.3f}".format(model_3.score(X_valid_vect, y_valid)))
from sklearn.ensemble import GradientBoostingClassifier
model_4 = RandomForestClassifier(min_samples_leaf=3, n_estimators=25, n_jobs=-1)
model_4.fit(X_train_vect, y_train)
# Training Accuracy
print("Training Accuracy: {:.3f}".format(model_4.score(X_train_vect, y_train)))
print("Validation Accuracy: {:.3f}".format(model_4.score(X_valid_vect, y_valid)))
from sklearn.neural_network import MLPClassifier
model_5 = MLPClassifier(hidden_layer_sizes=(32,), max_iter=100)
model_5.fit(X_train_vect, y_train)
# Training Accuracy
print("Training Accuracy: {:.3f}".format(model_5.score(X_train_vect, y_train)))
print("Validation Accuracy: {:.3f}".format(model_5.score(X_valid_vect, y_valid)))
## Pass
from sklearn.ensemble import VotingClassifier
classifiers = [("Logistic Regression", model_1),
("Naive Bayes", model_2),
("Random Forest", model_3),
("Gradient Boosted", model_4),
("Neural Nets", model_5)]
classifiers
final_model = VotingClassifier(classifiers, n_jobs=-1)
# Unfortuately, have to run Fit Again on the ensembled model before using it
# Wish there was an option to not have to fit again
final_model.fit(X_train_vect, y_train)
# Drum Rolls - Accuracy on the final Model
print("Training Accuracy: {:.3f}".format(final_model.score(X_train_vect, y_train)))
print("Validation Accuracy: {:.3f}".format(final_model.score(X_valid_vect, y_valid)))
# Read in the Test Dataset
# Note that it's missing the Sentiment Column. That's what we need to Predict
#
test_df = pd.read_csv("data/test.tsv", sep="\t")
test_df.head()
# Vectorize the Review Text
X_test = test_df.review
X_test_vect = vect.transform(X_test)
y_test_pred = final_model.predict(X_test_vect)
df = pd.DataFrame({
"document_id": test_df.document_id,
"sentiment": y_test_pred
})
df.to_csv("data/ensemble_submission1.csv", index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Training process
Step3: Vectorize Data (a.k.a. covert text to numbers)
Step4: Model - Logistic Regression
Step5: Model 2 - Naive Bayes
Step6: Model 3 - Random Forest
Step7: Model 4 - Gradient Boosted Trees
Step8: Model 5 - Neural Networks (CPU Only)
Step9: Neural Nets - Textbook Case of Overfitting. Maybe the model is too powerful
Step10: Finding it difficult to pick the Winning Model - Why not Average the Results
Step11: Let's Update Kaggle Submission
Steps
|
7,540
|
<ASSISTANT_TASK:>
Python Code:
sc
# Importation des packages
import time
from numpy import array
# Répertoire courant ou répertoire accessible de tous les "workers" du cluster
DATA_PATH=""
# Chargement des fichiers
import urllib.request
f = urllib.request.urlretrieve("https://www.math.univ-toulouse.fr/~besse/Wikistat/data/mnist_train.csv",DATA_PATH+"mnist_train.csv")
f = urllib.request.urlretrieve("https://www.math.univ-toulouse.fr/~besse/Wikistat/data/mnist_test.csv",DATA_PATH+"mnist_test.csv")
# Transformation du fichier texte en RDD de valeurs
## Données d'apprentissage
# Transformation ou étape map de séparation des champs
trainRDD = sc.textFile(DATA_PATH+"mnist_train.csv").map(lambda l: [float(x) for x in l.split(',')])
# Action
trainRDD.count() # taille de l'échantillon
#test
# Transformation du de la RDD en DataFrame
from pyspark.sql import Row
from pyspark.ml.linalg import Vectors
#Cette fonction va permettre de transformer chaque ligne de la RDD en une "Row" pyspark.sql.
def list_to_Row(l):
#Creation d'un vecteur sparse pour les features
features = Vectors.sparse(784,dict([(i,v) for i,v in enumerate(l[:-1]) if v!=0]))
row = Row(label = l[-1], features= features)
return row
trainDF = trainRDD.map(list_to_Row).toDF()
# Exemple de ligne
trainDF.take(1)[0]
## Même chose pour les données de test
testRDD = sc.textFile(DATA_PATH+'mnist_test.csv').map(lambda l: [float(x) for x in l.split(',')])
testRDD.count() # taille de l'échantillon
testDF = testRDD.map(list_to_Row).toDF()
testDF.take(1)
tauxEch=0.1 # tester pour des tailles croissantes d'échantillon d'apprentissage
(trainData, DropDatal) = trainRDD.randomSplit([tauxEch, 1-tauxEch])
trainData.count()
### Logistic Regression
from pyspark.ml.classification import LogisticRegression
### Configuraiton des paramètres de la méthode
time_start=time.time()
lr = LogisticRegression(maxIter=100, regParam=0.01, fitIntercept=False, tol=0.0001,
family = "multinomial", elasticNetParam=0.0) #0 for L2 penalty, 1 for L1 penalty
### Génération du modèle
model_lr = lr.fit(trainDF)
time_end=time.time()
time_lrm=(time_end - time_start)
print("LR prend %d s" %(time_lrm)) # (104s avec taux=1)
predictionsRDD = model_lr.transform(testDF)
labelsAndPredictions = predictionsRDD.select("label","prediction").collect()
nb_good_prediction = sum([r[0]==r[1] for r in labelsAndPredictions])
nb_test = testDF.count()
testErr = 1-nb_good_prediction/nb_test
print('Test Error = ' + str(testErr)) # (0.08 avec taux =1)
### Decision Tree
from pyspark.ml.classification import DecisionTreeClassifier
### Configuraiton des paramètres de la méthode
time_start=time.time()
dt = DecisionTreeClassifier(impurity='gini',maxDepth=5,maxBins=32, minInstancesPerNode=1,
minInfoGain=0.0)
### Génération du modèle
model_dt = dt.fit(trainDF)
time_end=time.time()
time_dt=(time_end - time_start)
print("DT takes %d s" %(time_dt))
predictionsRDD = model_dt.transform(testDF)
labelsAndPredictions = predictionsRDD.select("label","prediction").collect()
nb_good_prediction = sum([r[0]==r[1] for r in labelsAndPredictions])
nb_test = testDF.count()
testErr = 1-nb_good_prediction/nb_test
print('Test Error = ' + str(testErr))
### Random Forest
from pyspark.ml.classification import RandomForestClassifier
### Configuraiton des paramètres de la méthode
time_start=time.time()
rf = RandomForestClassifier(numTrees = 2, impurity='gini', maxDepth=12,
maxBins=32, seed=None)
### Génération du modèle
model_rf = rf.fit(trainDF)
time_end=time.time()
time_rf=(time_end - time_start)
print("RF takes %d s" %(time_rf))#
predictionsRDD = model_rf.transform(testDF)
labelsAndPredictions = predictionsRDD.select("label","prediction").collect()
nb_good_prediction = sum([r[0]==r[1] for r in labelsAndPredictions])
nb_test = testDF.count()
testErr = 1-nb_good_prediction/nb_test
print('Test Error = ' + str(testErr))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gestion des données
Step2: Conversion des données au format DataFrame
Step3: Sous-échantillon d'apprentissage
Step4: Méthode de classification
Step5: Erreur sur l'échantillon test
Step6: LogisticRegressionTrainingSummary provides a summary for a LogisticRegressionModel. Currently, only binary classification is supported. Support for multiclass model summaries will be added in the future.
Step7: Erreur sur l'échantillon test
Step8: Random Forest
Step9: Erreur sur l'échantillon test
|
7,541
|
<ASSISTANT_TASK:>
Python Code:
# Image from https://vene.ro/images/wmd-obama.png
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('wmd-obama.png')
imgplot = plt.imshow(img)
plt.axis('off')
plt.show()
# Initialize logging.
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
sentence_obama = 'Obama speaks to the media in Illinois'
sentence_president = 'The president greets the press in Chicago'
# Import and download stopwords from NLTK.
from nltk.corpus import stopwords
from nltk import download
download('stopwords') # Download stopwords list.
stop_words = stopwords.words('english')
def preprocess(sentence):
return [w for w in sentence.lower().split() if w not in stop_words]
sentence_obama = preprocess(sentence_obama)
sentence_president = preprocess(sentence_president)
import gensim.downloader as api
model = api.load('word2vec-google-news-300')
distance = model.wmdistance(sentence_obama, sentence_president)
print('distance = %.4f' % distance)
sentence_orange = preprocess('Oranges are my favorite fruit')
distance = model.wmdistance(sentence_obama, sentence_orange)
print('distance = %.4f' % distance)
model.init_sims(replace=True) # Normalizes the vectors in the word2vec class.
distance = model.wmdistance(sentence_obama, sentence_president) # Compute WMD as normal.
print('distance: %r' % distance)
distance = model.wmdistance(sentence_obama, sentence_orange)
print('distance = %.4f' % distance)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This method was introduced in the article "From Word Embeddings To Document
Step2: These sentences have very similar content, and as such the WMD should be low.
Step3: Now, as mentioned earlier, we will be using some downloaded pre-trained
Step4: So let's compute WMD using the wmdistance method.
Step5: Let's try the same thing with two completely unrelated sentences. Notice that the distance is larger.
Step6: Normalizing word2vec vectors
|
7,542
|
<ASSISTANT_TASK:>
Python Code:
for i in locations:
print i
if i not in sch:sch[i]={}
#march 11-24 = 2 weeks
for d in range (11,25):
if d not in sch[i]:
try:
url=airportialinks[i]
full=url+'arrivals/201703'+str(d)
m=requests.get(full).content
sch[i][full]=pd.read_html(m)[0]
#print full
except: pass #print 'no tables',i,d
for i in range(11,25):
testurl=u'https://www.airportia.com/jordan/queen-alia-international-airport/arrivals/201703'+str(i)
print 'nr. of flights on March',i,':',len(sch['AMM'][testurl])
testurl=u'https://www.airportia.com/jordan/queen-alia-international-airport/arrivals/20170318'
k=sch['AMM'][testurl]
k[k['From']=='Frankfurt FRA']
mdf=pd.DataFrame()
for i in sch:
for d in sch[i]:
df=sch[i][d].drop(sch[i][d].columns[3:],axis=1).drop(sch[i][d].columns[0],axis=1)
df['To']=i
df['Date']=d
mdf=pd.concat([mdf,df])
mdf['City']=[i[:i.rfind(' ')] for i in mdf['From']]
mdf['Airport']=[i[i.rfind(' ')+1:] for i in mdf['From']]
k=mdf[mdf['Date']==testurl]
k[k['From']=='Frankfurt FRA']
file("mdf_jo_arrv.json",'w').write(json.dumps(mdf.reset_index().to_json()))
len(mdf)
airlines=set(mdf['Airline'])
cities=set(mdf['City'])
file("cities_jo_arrv.json",'w').write(json.dumps(list(cities)))
file("airlines_jo_arrv.json",'w').write(json.dumps(list(airlines)))
citycoords={}
for i in cities:
if i not in citycoords:
if i==u'Birmingham': z='Birmingham, UK'
elif i==u'Valencia': z='Valencia, Spain'
elif i==u'Naples': z='Naples, Italy'
elif i==u'St. Petersburg': z='St. Petersburg, Russia'
elif i==u'Bristol': z='Bristol, UK'
elif i==u'Beida': z='Bayda, Libya'
else: z=i
citycoords[i]=Geocoder(apik).geocode(z)
print i
citysave={}
for i in citycoords:
citysave[i]={"coords":citycoords[i][0].coordinates,
"country":citycoords[i][0].country}
file("citysave_jo_arrv.json",'w').write(json.dumps(citysave))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: sch checks out with source
Step2: mdf checks out with source
|
7,543
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print("Downloaded " + local)
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkstats2.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkplot.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/nsfg.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/first.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dct")
download(
"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dat.gz"
)
import thinkstats2
import thinkplot
import first
live, firsts, others = first.MakeFrames()
first_wgt = firsts.totalwgt_lb
first_wgt_dropna = first_wgt.dropna()
print('Firsts', len(first_wgt), len(first_wgt_dropna))
other_wgt = others.totalwgt_lb
other_wgt_dropna = other_wgt.dropna()
print('Others', len(other_wgt), len(other_wgt_dropna))
first_pmf = thinkstats2.Pmf(first_wgt_dropna, label='first')
other_pmf = thinkstats2.Pmf(other_wgt_dropna, label='other')
width = 0.4 / 16
# plot PMFs of birth weights for first babies and others
thinkplot.PrePlot(2)
thinkplot.Hist(first_pmf, align='right', width=width)
thinkplot.Hist(other_pmf, align='left', width=width)
thinkplot.Config(xlabel='Weight (pounds)', ylabel='PMF')
def PercentileRank(scores, your_score):
count = 0
for score in scores:
if score <= your_score:
count += 1
percentile_rank = 100.0 * count / len(scores)
return percentile_rank
t = [55, 66, 77, 88, 99]
PercentileRank(t, 88)
def Percentile(scores, percentile_rank):
scores.sort()
for score in scores:
if PercentileRank(scores, score) >= percentile_rank:
return score
Percentile(t, 50)
def Percentile2(scores, percentile_rank):
scores.sort()
index = percentile_rank * (len(scores)-1) // 100
return scores[int(index)]
Percentile2(t, 50)
def EvalCdf(sample, x):
count = 0.0
for value in sample:
if value <= x:
count += 1
prob = count / len(sample)
return prob
t = [1, 2, 2, 3, 5]
EvalCdf(t, 0), EvalCdf(t, 1), EvalCdf(t, 2), EvalCdf(t, 3), EvalCdf(t, 4), EvalCdf(t, 5)
cdf = thinkstats2.Cdf(live.prglngth, label='prglngth')
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='Pregnancy length (weeks)', ylabel='CDF', loc='upper left')
cdf.Prob(41)
cdf.Value(0.5)
first_cdf = thinkstats2.Cdf(firsts.totalwgt_lb, label='first')
other_cdf = thinkstats2.Cdf(others.totalwgt_lb, label='other')
thinkplot.PrePlot(2)
thinkplot.Cdfs([first_cdf, other_cdf])
thinkplot.Config(xlabel='Weight (pounds)', ylabel='CDF')
weights = live.totalwgt_lb
live_cdf = thinkstats2.Cdf(weights, label='live')
median = live_cdf.Percentile(50)
median
iqr = (live_cdf.Percentile(25), live_cdf.Percentile(75))
iqr
live_cdf.PercentileRank(10.2)
sample = np.random.choice(weights, 100, replace=True)
ranks = [live_cdf.PercentileRank(x) for x in sample]
rank_cdf = thinkstats2.Cdf(ranks)
thinkplot.Cdf(rank_cdf)
thinkplot.Config(xlabel='Percentile rank', ylabel='CDF')
resample = live_cdf.Sample(1000)
thinkplot.Cdf(live_cdf)
thinkplot.Cdf(thinkstats2.Cdf(resample, label='resample'))
thinkplot.Config(xlabel='Birth weight (pounds)', ylabel='CDF')
# Solution
first_cdf.PercentileRank(8.5)
# Solution
other_cdf.PercentileRank(8.5)
# Solution
t = np.random.random(1000)
# Solution
pmf = thinkstats2.Pmf(t)
thinkplot.Pmf(pmf, linewidth=0.1)
thinkplot.Config(xlabel='Random variate', ylabel='PMF')
# Solution
cdf = thinkstats2.Cdf(t)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='Random variate', ylabel='CDF')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: And compute the distribution of birth weight for first babies and others.
Step3: We can plot the PMFs on the same scale, but it is hard to see if there is a difference.
Step4: PercentileRank computes the fraction of scores less than or equal to your_score.
Step5: If this is the list of scores.
Step6: If you got the 88, your percentile rank is 80.
Step7: Percentile takes a percentile rank and computes the corresponding percentile.
Step8: The median is the 50th percentile, which is 77.
Step9: Here's a more efficient way to compute percentiles.
Step10: Let's hope we get the same answer.
Step11: The Cumulative Distribution Function (CDF) is almost the same as PercentileRank. The only difference is that the result is 0-1 instead of 0-100.
Step12: In this list
Step13: We can evaluate the CDF for various values
Step14: Here's an example using real data, the distribution of pregnancy length for live births.
Step15: Cdf provides Prob, which evaluates the CDF; that is, it computes the fraction of values less than or equal to the given value. For example, 94% of pregnancy lengths are less than or equal to 41.
Step16: Value evaluates the inverse CDF; given a fraction, it computes the corresponding value. For example, the median is the value that corresponds to 0.5.
Step17: In general, CDFs are a good way to visualize distributions. They are not as noisy as PMFs, and if you plot several CDFs on the same axes, any differences between them are apparent.
Step18: In this example, we can see that first babies are slightly, but consistently, lighter than others.
Step19: Again, the median is the 50th percentile.
Step20: The interquartile range is the interval from the 25th to 75th percentile.
Step21: We can use the CDF to look up the percentile rank of a particular value. For example, my second daughter was 10.2 pounds at birth, which is near the 99th percentile.
Step22: If we draw a random sample from the observed weights and map each weigh to its percentile rank.
Step23: The resulting list of ranks should be approximately uniform from 0-1.
Step24: That observation is the basis of Cdf.Sample, which generates a random sample from a Cdf. Here's an example.
Step25: This confirms that the random sample has the same distribution as the original data.
Step26: Exercise
|
7,544
|
<ASSISTANT_TASK:>
Python Code:
# useful math functions
from math import pi, cos, acos, sqrt
# importing the QISKit
from qiskit import Aer, IBMQ
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister, execute
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
# useful additional packages
from qiskit.wrapper.jupyter import *
from qiskit.backends.ibmq import least_busy
IBMQ.load_accounts()
def ch(qProg, a, b):
Controlled-Hadamard gate
qProg.h(b)
qProg.sdg(b)
qProg.cx(a, b)
qProg.h(b)
qProg.t(b)
qProg.cx(a, b)
qProg.t(b)
qProg.h(b)
qProg.s(b)
qProg.x(b)
qProg.s(a)
return qProg
def cu3(qProg, theta, phi, lambd, c, t):
Controlled-u3 gate
qProg.u1((lambd-phi)/2, t)
qProg.cx(c, t)
qProg.u3(-theta/2, 0, -(phi+lambd)/2, t)
qProg.cx(c, t)
qProg.u3(theta/2, phi, 0, t)
return qProg
#CHANGE THIS 7BIT 0-1 STRING TO PERFORM EXPERIMENT ON ENCODING 0000000, ..., 1111111
x1234567 = "0101010"
if len(x1234567) != 7 or not("1" in x1234567 or "0" in x1234567):
raise Exception("x1234567 is a 7-bit 0-1 pattern. Please set it to the correct pattern")
#compute the value of rotation angle theta of (3,1)-QRAC
theta = acos(sqrt(0.5 + sqrt(3.0)/6.0))
#to record the u3 parameters for encoding 000, 010, 100, 110, 001, 011, 101, 111
rotationParams = {"000":(2*theta, pi/4, -pi/4), "010":(2*theta, 3*pi/4, -3*pi/4),
"100":(pi-2*theta, pi/4, -pi/4), "110":(pi-2*theta, 3*pi/4, -3*pi/4),
"001":(2*theta, -pi/4, pi/4), "011":(2*theta, -3*pi/4, 3*pi/4),
"101":(pi-2*theta, -pi/4, pi/4), "111":(pi-2*theta, -3*pi/4, 3*pi/4)}
# Creating registers
# qubits for encoding 7 bits of information with qr[0] kept by the sender
qr = QuantumRegister(3)
# bits for recording the measurement of the qubits qr[1] and qr[2]
cr = ClassicalRegister(2)
encodingName = "Encode"+x1234567
encodingCircuit = QuantumCircuit(qr, cr, name=encodingName)
#Prepare superposition of mixing QRACs of x1...x6 and x7
encodingCircuit.u3(1.187, 0, 0, qr[0])
#Encoding the seventh bit
seventhBit = x1234567[6]
if seventhBit == "1": #copy qr[0] into qr[1] and qr[2]
encodingCircuit.cx(qr[0], qr[1])
encodingCircuit.cx(qr[0], qr[2])
#perform controlled-Hadamard qr[0], qr[1], and toffoli qr[0], qr[1] , qr[2]
encodingCircuit = ch(encodingCircuit, qr[0], qr[1])
encodingCircuit.ccx(qr[0], qr[1], qr[2])
#End of encoding the seventh bit
#encode x1...x6 with two (3,1)-QRACS. To do that, we must flip q[0] so that the controlled encoding is executed
encodingCircuit.x(qr[0])
#Encoding the first 3 bits 000, ..., 111 into the second qubit, i.e., (3,1)-QRAC on the second qubit
firstThreeBits = x1234567[0:3]
#encodingCircuit.cu3(*rotationParams[firstThreeBits], qr[0], qr[1])
encodingCircuit = cu3(encodingCircuit, *rotationParams[firstThreeBits], qr[0], qr[1])
#Encoding the second 3 bits 000, ..., 111 into the third qubit, i.e., (3,1)-QRAC on the third qubit
secondThreeBits = x1234567[3:6]
#encodingCircuit.cu3(*rotationParams[secondTreeBits], qr[0], qr[2])
encodingCircuit = cu3(encodingCircuit, *rotationParams[secondThreeBits], qr[0], qr[2])
#end of encoding
encodingCircuit.barrier()
# dictionary for decoding circuits
decodingCircuits = {}
# Quantum circuits for decoding the 1st to 6th bits
for i, pos in enumerate(["First", "Second", "Third", "Fourth", "Fifth", "Sixth"]):
circuitName = "Decode"+pos
decodingCircuits[circuitName] = QuantumCircuit(qr, cr, name=circuitName)
if i < 3: #measure 1st, 2nd, 3rd bit
if pos == "Second": #if pos == "First" we can directly measure
decodingCircuits[circuitName].h(qr[1])
elif pos == "Third":
decodingCircuits[circuitName].u3(pi/2, -pi/2, pi/2, qr[1])
decodingCircuits[circuitName].measure(qr[1], cr[1])
else: #measure 4th, 5th, 6th bit
if pos == "Fifth": #if pos == "Fourth" we can directly measure
decodingCircuits[circuitName].h(qr[2])
elif pos == "Sixth":
decodingCircuits[circuitName].u3(pi/2, -pi/2, pi/2, qr[2])
decodingCircuits[circuitName].measure(qr[2], cr[1])
#Quantum circuits for decoding the 7th bit
decodingCircuits["DecodeSeventh"] = QuantumCircuit(qr, cr, name="DecodeSeventh")
decodingCircuits["DecodeSeventh"].measure(qr[1], cr[0])
decodingCircuits["DecodeSeventh"].measure(qr[2], cr[1])
#combine encoding and decoding of (7,2)-QRACs to get a list of complete circuits
circuitNames = []
circuits = []
k1 = encodingName
for k2 in decodingCircuits.keys():
circuitNames.append(k1+k2)
circuits.append(encodingCircuit+decodingCircuits[k2])
print("List of circuit names:", circuitNames) #list of circuit names
# for circuit in circuits: #list qasms codes
# print(circuit.qasm())
%%qiskit_job_status
# Use the qasm simulator
backend = Aer.get_backend("qasm_simulator")
# Use the IBM Quantum Experience
# backend = least_busy(IBMQ.backends(simulator=False))
shots = 1000
job = execute(circuits, backend=backend, shots=shots)
results = job.result()
for k in ["DecodeFirst", "DecodeSecond", "DecodeThird", "DecodeFourth", "DecodeFifth", "DecodeSixth"]:
print("Experimental Result of ", encodingName+k)
plot_histogram(results.get_counts(circuits[circuitNames.index(encodingName+k)]))
print("Experimental result of ", encodingName+"DecodeSeventh")
plot_histogram(results.get_counts(circuits[circuitNames.index(encodingName+"DecodeSeventh")]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: We prepare the controlled-Hadamard and controlled-u3 gates that are required in the encoding as below.
Step4: Encoding 7 bits into 2 qubits with $(7,2)$-QRAC
Step5: Below are plots of the experimental results of extracting the first to sixth bit, that results in observing the-$i$th bit with probability at least $0.54$
Step6: The seventh bit is obtained by looking at the content of classical registers. If they are the same, i.e., both are 1 or 0, then we conclude that it is 0, or otherwise 1. For the encoding of 0101010, the seventh bit is 0, so the total probability of observing 00 and 11 must exceed that of observing 01 and 10.
|
7,545
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import Image
# Add your filename and uncomment the following line:
Image(filename='TheoryAndPracticeEx01graph.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graphical excellence and integrity
|
7,546
|
<ASSISTANT_TASK:>
Python Code:
from PIL import Image
import numpy as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from sklearn import datasets, tree
matplotlib.style.use('bmh')
matplotlib.rcParams['figure.figsize']=(10,7)
# windows only hack for graphviz path
import os
for path in os.environ['PATH'].split(os.pathsep):
if path.endswith("Library\\bin"):
os.environ['PATH']+=os.pathsep+os.path.join(path, 'graphviz')
# Iris dataset
iris = datasets.load_iris()
X, y = iris.data, iris.target
clf=tree.DecisionTreeClassifier()
clf.fit(X, y)
iris_feature_names = ["花萼長度", "花萼寬度", "花瓣長度","花瓣寬度"]
iris_target_names = ["山鳶尾", "變色鳶尾", "維吉尼亞鳶尾"]
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=iris_feature_names,
class_names=iris_target_names,
filled=True, rounded=True,
special_characters=True)
import pydot_ng as pydot
from IPython.display import SVG
SVG(pydot.graph_from_dot_data(dot_data).create_svg())
# 只取 X 的兩個 features
X = iris.data[:, [0, 1]]
clf.fit(X, y)
# 邊界
x_min, y_min = X.min(axis=0)-1
x_max, y_max = X.max(axis=0)+1
# 座標點
grid = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
# grid.shape = (2, 200, 200)
# 在座標點 算出 logistic 的預測
Z = clf.predict(grid.reshape(2, -1).T)
Z = Z.reshape(grid.shape[1:])
# 畫出顏色和邊界
plt.pcolormesh(grid[0], grid[1], Z, cmap=plt.cm.rainbow, alpha=0.02)
plt.contour(grid[0], grid[1], Z, colors=['k', 'k', 'k'], linestyles=['-', '-', '-'],
levels=[0, 1, 2])
# 標出 sample 點
plt.scatter(X[:,0], X[:, 1], c=y, cmap=plt.cm.rainbow, zorder=10, s=50);
import gzip
import pickle
with gzip.open('mnist.pkl.gz', 'rb') as f:
train_set, validation_set, test_set = pickle.load(f, encoding='latin1')
train_X, train_y = train_set
test_X, test_y = test_set
#PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=60)
train_X = pca.fit_transform(train_set[0])
test_X = pca.transform(test_set[0])
# use only first 10000 samples
#idx = np.random.choice(np.arange(train_X.shape[0]), 30000, replace=False)
#train_X = train_X[idx]
#train_y = train_y[idx]
clf = tree.DecisionTreeClassifier()
%%timeit -n 1 -r 1
clf.fit(train_X, train_y)
%%timeit -n 1 -r 1
print(np.mean(clf.predict(train_X) == train_y))
%%timeit -n 1 -r 1
print(np.mean(clf.predict(test_X) == test_y))
%%timeit -n 1 -r 1
%run -i q_dtree_halfsize.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 簡易的 決策樹 實驗
Step2: Q
Step3: Q
|
7,547
|
<ASSISTANT_TASK:>
Python Code:
import os
# The Vertex AI Workbench Notebook product has specific requirements
IS_WORKBENCH_NOTEBOOK = os.getenv("DL_ANACONDA_HOME")
IS_USER_MANAGED_WORKBENCH_NOTEBOOK = os.path.exists(
"/opt/deeplearning/metadata/env_version"
)
# Vertex AI Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_WORKBENCH_NOTEBOOK:
USER_FLAG = "--user"
! pip3 install --upgrade google-cloud-aiplatform {USER_FLAG} -q
! pip3 install -U grpcio-tools {USER_FLAG} -q
! pip3 install -U h5py {USER_FLAG} -q
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
shell_output = ! gcloud projects list --filter="PROJECT_ID:'{PROJECT_ID}'" --format='value(PROJECT_NUMBER)'
PROJECT_NUMBER = shell_output[0]
print("Project Number:", PROJECT_NUMBER)
REGION = "[your-region]" # @param {type: "string"}
if REGION == "[your-region]":
REGION = "us-central1"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Vertex AI Workbench, then don't execute this code
IS_COLAB = False
if not os.path.exists("/opt/deeplearning/metadata/env_version") and not os.getenv(
"DL_ANACONDA_HOME"
):
if "google.colab" in sys.modules:
IS_COLAB = True
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
BUCKET_URI = f"gs://{BUCKET_NAME}"
if BUCKET_URI == "" or BUCKET_URI is None or BUCKET_URI == "gs://[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "aip-" + TIMESTAMP
BUCKET_URI = "gs://" + BUCKET_NAME
! gsutil mb -l $REGION $BUCKET_URI
! gsutil ls -al $BUCKET_URI
import google.cloud.aiplatform as aiplatform
import h5py
aiplatform.init(project=PROJECT_ID, location=REGION, staging_bucket=BUCKET_URI)
! gsutil cp gs://cloud-samples-data/vertex-ai/matching_engine/glove-100-angular.hdf5 .
h5 = h5py.File("glove-100-angular.hdf5", "r")
train = h5["train"]
test = h5["test"]
print(train)
with open("glove100.json", "w") as f:
for i in range(len(train)):
f.write('{"id":"' + str(i) + '",')
f.write('"embedding":[' + ",".join(str(x) for x in train[i]) + "]}")
f.write("\n")
EMBEDDINGS_INITIAL_URI = f"{BUCKET_URI}/matching_engine/initial/"
! gsutil cp glove100.json {EMBEDDINGS_INITIAL_URI}
DIMENSIONS = 100
DISPLAY_NAME = "glove_100_1"
tree_ah_index = aiplatform.MatchingEngineIndex.create_tree_ah_index(
display_name=DISPLAY_NAME,
contents_delta_uri=EMBEDDINGS_INITIAL_URI,
dimensions=DIMENSIONS,
approximate_neighbors_count=150,
distance_measure_type="DOT_PRODUCT_DISTANCE",
description="Glove 100 ANN index",
labels={"label_name": "label_value"},
# TreeAH specific parameters
leaf_node_embedding_count=500,
leaf_nodes_to_search_percent=7,
)
INDEX_RESOURCE_NAME = tree_ah_index.resource_name
print(INDEX_RESOURCE_NAME)
with open("glove100_incremental.json", "w") as f:
f.write(
'{"id":"0","embedding":[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]}\n'
)
EMBEDDINGS_UPDATE_URI = f"{BUCKET_URI}/matching-engine/incremental/"
! gsutil cp glove100_incremental.json {EMBEDDINGS_UPDATE_URI}
tree_ah_index = tree_ah_index.update_embeddings(
contents_delta_uri=EMBEDDINGS_UPDATE_URI,
)
INDEX_RESOURCE_NAME = tree_ah_index.resource_name
print(INDEX_RESOURCE_NAME)
# This is for display only; you can name the range anything.
PEERING_RANGE_NAME = "vertex-ai-prediction-peering-range"
NETWORK = "default"
# NOTE: `prefix-length=16` means a CIDR block with mask /16 will be
# reserved for use by Google services, such as Vertex AI.
! gcloud compute addresses create $PEERING_RANGE_NAME \
--global \
--prefix-length=16 \
--description="peering range for Google service" \
--network=$NETWORK \
--purpose=VPC_PEERING
! gcloud services vpc-peerings connect \
--service=servicenetworking.googleapis.com \
--network=$NETWORK \
--ranges=$PEERING_RANGE_NAME \
--project=$PROJECT_ID
! gcloud compute networks peerings list --network $NETWORK
full_network_name = f"projects/{PROJECT_NUMBER}/global/networks/{NETWORK}"
index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
display_name="index_endpoint_for_demo",
description="index endpoint description",
network=full_network_name,
)
INDEX_ENDPOINT_NAME = index_endpoint.resource_name
print(INDEX_ENDPOINT_NAME)
DEPLOYED_INDEX_ID = "tree_ah_glove_deployed_" + TIMESTAMP
MIN_NODES = 1
MAX_NODES = 2
DEPLOY_COMPUTE = "n1-standard-16"
index_endpoint.deploy_index(
display_name="deployed_index_for_demo",
index=tree_ah_index,
deployed_index_id=DEPLOYED_INDEX_ID,
# machine_type=DEPLOY_COMPUTE,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
print(index_endpoint.deployed_indexes)
# The number of nearest neighbors to be retrieved from database for each query.
NUM_NEIGHBOURS = 10
# Test query
queries = [
[
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
],
[
-0.99544,
-2.3651,
-0.24332,
-1.0321,
0.42052,
-1.1817,
-0.16451,
-1.683,
0.49673,
-0.27258,
-0.025397,
0.34188,
1.5523,
1.3532,
0.33297,
-0.0056677,
-0.76525,
0.49587,
1.2211,
0.83394,
-0.20031,
-0.59657,
0.38485,
-0.23487,
-1.0725,
0.95856,
0.16161,
-1.2496,
1.6751,
0.73899,
0.051347,
-0.42702,
0.16257,
-0.16772,
0.40146,
0.29837,
0.96204,
-0.36232,
-0.47848,
0.78278,
0.14834,
1.3407,
0.47834,
-0.39083,
-1.037,
-0.24643,
-0.75841,
0.7669,
-0.37363,
0.52741,
0.018563,
-0.51301,
0.97674,
0.55232,
1.1584,
0.73715,
1.3055,
-0.44743,
-0.15961,
0.85006,
-0.34092,
-0.67667,
0.2317,
1.5582,
1.2308,
-0.62213,
-0.032801,
0.1206,
-0.25899,
-0.02756,
-0.52814,
-0.93523,
0.58434,
-0.24799,
0.37692,
0.86527,
0.069626,
1.3096,
0.29975,
-1.3651,
-0.32048,
-0.13741,
0.33329,
-1.9113,
-0.60222,
-0.23921,
0.12664,
-0.47961,
-0.89531,
0.62054,
0.40869,
-0.08503,
0.6413,
-0.84044,
-0.74325,
-0.19426,
0.098722,
0.32648,
-0.67621,
-0.62692,
],
]
matches = index_endpoint.match(
deployed_index_id=DEPLOYED_INDEX_ID, queries=queries, num_neighbors=NUM_NEIGHBOURS
)
for instance in matches:
print("INSTANCE")
for match in instance:
print(match)
brute_force_index = aiplatform.MatchingEngineIndex.create_brute_force_index(
display_name=DISPLAY_NAME,
contents_delta_uri=EMBEDDINGS_INITIAL_URI,
dimensions=DIMENSIONS,
distance_measure_type="DOT_PRODUCT_DISTANCE",
description="Glove 100 index (brute force)",
labels={"label_name": "label_value"},
)
INDEX_BRUTE_FORCE_RESOURCE_NAME = brute_force_index.resource_name
print(INDEX_BRUTE_FORCE_RESOURCE_NAME)
brute_force_index = tree_ah_index.update_embeddings(
contents_delta_uri=EMBEDDINGS_UPDATE_URI
)
DEPLOYED_BRUTE_FORCE_INDEX_ID = "glove_brute_force_deployed_" + TIMESTAMP
index_endpoint.deploy_index(
index=brute_force_index, deployed_index_id=DEPLOYED_BRUTE_FORCE_INDEX_ID
)
prod_matches = index_endpoint.match(
deployed_index_id=DEPLOYED_INDEX_ID,
queries=list(test),
num_neighbors=NUM_NEIGHBOURS,
)
exact_matches = index_endpoint.match(
deployed_index_id=DEPLOYED_BRUTE_FORCE_INDEX_ID,
queries=list(test),
num_neighbors=NUM_NEIGHBOURS,
)
# Calculate recall by determining how many neighbors were correctly retrieved as compared to the brute-force option.
correct_neighbors = 0
for tree_ah_neighbors, brute_force_neighbors in zip(prod_matches, exact_matches):
tree_ah_neighbor_ids = [neighbor.id for neighbor in tree_ah_neighbors]
brute_force_neighbor_ids = [neighbor.id for neighbor in brute_force_neighbors]
correct_neighbors += len(
set(tree_ah_neighbor_ids).intersection(brute_force_neighbor_ids)
)
recall = correct_neighbors / (len(test) * NUM_NEIGHBOURS)
print("Recall: {}".format(recall))
# Force undeployment of indexes and delete endpoint
try:
index_endpoint.delete(force=True)
except Exception as e:
print(e)
# Delete indexes
try:
tree_ah_index.delete()
brute_force_index.delete()
except Exception as e:
print(e)
delete_bucket = False
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -rf {BUCKET_URI}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Restart the kernel
Step2: Before you begin
Step3: Get your project number
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex AI SDK for Python
Step12: Download and prepare the prebuilt GloVe embeddings
Step13: Load the embeddings into memory
Step14: Save the train split in JSONL format
Step15: Store the JSONL formatted embeddings in Cloud Storage
Step16: Create Matching Engine Index
Step17: Update the Index
Step18: Update the index
Step19: Setup VPC peering network
Step20: Create the VPC connection
Step21: Check the status of your peering connections.
Step22: Construct the full network name
Step23: Create an IndexEndpoint with VPC Network
Step24: Deploy the Matching Engine Index to the Index Endpoint resource
Step25: Create and execute an online query
Step26: Create brute force index for calibration
Step27: Update the index
Step28: Deploy the brute force index to the IndexEndpoint resource
Step29: Calibration
Step30: Compute Recall
Step31: Cleaning up
|
7,548
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib as mpl
% matplotlib inline
import pandas as pd
import seaborn as sns
from IPython.display import IFrame
import elviz_utils
reduced = pd.read_csv('../results/reduced_data--all_phylogeny_remains.csv')
sample_info = elviz_utils.read_sample_info('../')
sample_info.head()
IFrame('./plot_copies/160330_Order-Burkholderiales_Methylophilales_Methylococcales--Phylum-Bacteroidetes--rep.pdf',
width=800, height=300)
ls "../plots/mixed_phylogeny/"
sample_info[(sample_info['rep'] == 1) &
(sample_info['oxy'] == 'High') &
(sample_info['week'].isin([8, 10]))]
reduced[(reduced['Order']== 'Burkholderiales') &(reduced['ID']=='55_HOW8')]
reduced[(reduced['Order']== 'Burkholderiales') &(reduced['ID']=='79_HOW10')]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: Look into samples that have "too high" of Burkold.
Step3: Link to Elviz Data for 55_HOW8 (High O2 Rep 1 week 8)
Step4: Link to Elviz Data for 79_HOW10 (High O2 Rep 1 week 10)
|
7,549
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import numpy as np # foundation for Pandas
url = 'http://www.imf.org/external/pubs/ft/weo/2016/02/weodata/WEOOct2016all.xls'
# (1) define the column indices
col_indices = [1, 2, 3, 4, 6] + list(range(9, 46))
# (2) download the dataset
weo = pd.read_csv(url,
sep = '\t',
#index_col='ISO',
usecols=col_indices,
skipfooter=1, engine='python',
na_values=['n/a', '--'],
thousands =',',encoding='windows-1252')
# (3) turn the types of year variables into float
years = [str(year) for year in range(1980, 2017)]
weo[years] = weo[years].astype(float)
print('Variable dtypes:\n', weo.dtypes, sep='')
# create debt and deficits dataframe: two variables and three countries
variables = ['GGXWDG_NGDP', 'GGXCNL_NGDP']
countries = ['ARG', 'DEU', 'GRC']
dd = weo[weo['WEO Subject Code'].isin(variables) & weo['ISO'].isin(countries)]
# change column labels to something more intuitive
dd = dd.rename(columns={'WEO Subject Code': 'Variable',
'Subject Descriptor': 'Description'})
# rename variables (i.e. values of observables)
dd['Variable'] = dd['Variable'].replace(to_replace=['GGXWDG_NGDP', 'GGXCNL_NGDP'], value=['Debt', 'Surplus'])
dd
dd.index
dd.columns
dd['ISO']
dd[['ISO', 'Variable']]
dd[dd['ISO'] == 'ARG']
dd.T
dd.set_index('Country')
# we can do the same thing with a list, which will be meaningful soon...
dd.set_index(['Country'])
ddi = dd.set_index(['Variable', 'Country', 'ISO', 'Description', 'Units'])
ddi
ddi.index
# Chase and Spencer like double quotes
print("The level names are:\n", ddi.index.names, "\n", sep="")
print("The levels (aka level values) are:\n", ddi.index.levels, sep="")
ddi.head(2)
ddi.reset_index()
# or we can reset the index by level
ddi.reset_index(level=1).head(2)
# or by name
ddi.reset_index(level='Units').head(2)
# or do more than one at a time
ddi.reset_index(level=[1, 3]).head(2)
ddi.reset_index(level=[1, 3], drop=True).head(2)
ddt = ddi.T
ddt
ddt.plot()
# indexing by variable
debt = ddt['Debt']
debt
ddt['Debt']['Argentina']
ddt['Debt', 'Argentina']
#ddt['ARG']
fig, ax = plt.subplots()
ddt['Debt'].plot(ax=ax)
ax.legend(['ARG', 'DEU', 'GRE'], loc='best')
#ax.axhline(100, color='k', linestyle='--', alpha=.5)
ddts = ddt.swaplevel(0, 1, axis=1)
ddts
# ddt.xs?
ddt.xs("Argentina", axis=1, level="Country")
ddt.xs("Argentina", axis=1, level="Country")["Debt"]
ddi.stack?
# example from docstring
dic = {'a': [1, 3], 'b': [2, 4]}
s = pd.DataFrame(data=dic, index=['one', 'two'])
print(s)
s.stack()
ddi.index
ddi.unstack() # Units variable has only one value, so this doesn't do much
ddi.unstack(level='ISO')
# drop some of the index levels (think s for small)
dds = ddi.reset_index(level=[1, 3, 4], drop=True)
dds
# give a name to the column labels
dds.columns.name = 'Year'
dds
# convert to long format. Notice printing is different... what `type` is ds?
ds = dds.stack()
ds
# same thing with explicit reference to column name
dds.stack(level='Year').head(8)
# or with level number
dds.stack(level=0).head(8)
# now go long to wide
ds.unstack() # default is lowest value wich is year now
# different level
ds.unstack(level='Variable')
# or two at once
ds.unstack(level=['Variable', 'ISO'])
# stacked dataframe
ds.head(8)
du1 = ds.unstack()
du2 = du1.unstack()
url = 'http://www2.census.gov/ces/bds/firm/bds_f_sz_release.csv'
raw = pd.read_csv(url)
raw.head()
# Four size categories
sizes = ['a) 1 to 4', 'b) 5 to 9', 'c) 10 to 19', 'd) 20 to 49']
# only defined size categories and only period since 2012
restricted_sample = (raw['year2']>=2012) & raw['fsize'].isin(sizes)
# don't need all variables
var_names = ['year2', 'fsize', 'Firms', 'Emp']
bds = raw[restricted_sample][var_names]
bds
bdsp = bds.pivot(index='fsize', columns='year2', values='Emp')
# divide by a million so bars aren't too long
bdsp = bdsp/10**6
bdsp
# plot 2013 as bar chart
fig, ax = plt.subplots()
bdsp[2013].plot(ax=ax, kind='barh')
ax.set_ylabel('')
ax.set_xlabel('Number of Employees (millions)')
url1 = 'http://www.oecd.org/health/health-systems/'
url2 = 'OECD-Health-Statistics-2017-Frequently-Requested-Data.xls'
docs = pd.read_excel(url1+url2,
skiprows=3,
usecols=[0, 51, 52, 53, 54, 55, 57],
sheetname='Physicians',
na_values=['..'],
skip_footer=21)
# rename country variable
names = list(docs)
docs = docs.rename(columns={names[0]: 'Country'})
# strip footnote numbers from country names
docs['Country'] = docs['Country'].str.rsplit(n=1).str.get(0)
docs = docs.head()
docs
#
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id=wants></a>
Step2: Reminders
Step3: Wants
Step4: Comments. The problem here is that the columns include both the numbers (which we want to plot) and some descriptive information (which we don't).
Step5: Exercise. Set Variable as the index.
Step6: Let's take a closer look at the index
Step7: That's a lot to process, so we break it into pieces.
Step8: Knowing the order of the index components and being able to inspect their values and names is fundamental to working with a multi-index.
Step9: Comment. By default, reset_index pushes one or more index levels into columns. If we want to discard that level of the index altogether, we use the parameter drop=True.
Step10: Exercise. For the dataframe ddi do the following in separate code cells
Step11: Comment. We see here that the multi-index for the rows has been turned into a multi-index for the columns. Works the same way.
Step12: Referring to variables with a multi-index
Step13: What's going on? The theme is that we can reference the top level, which in ddi is the Variable. If we try to access a lower level, it bombs.
Step14: Swapping levels
Step15: Exercise. Use the dataframe ddts to plot Debt and Surplus across time for Argentina. Hint
Step16: Exercise. Use a combination of xs and standard slicing with [...] to extract the variable Debt for Greece.
Step17: Single level index
Step18: Multi-index
Step19: Let's get a smaller subset of this data to work with so we can see things a bit more clearly
Step20: Let's remind ourselves what we want. We want to
Step21: Unstacking
Step22: Exercise. Run the code below and explain what each line of code does.
Step23: Exercise (challenging). Take the unstacked dataframe dds. Use some combination of stack, unstack, and plot to plot the variable Surplus against Year for all three countries. Challenging mostly because you need to work out the steps by yourself.
Step24: Pivoting the data
Step25: Comment. Note that all the parameters here are columns. That's not a choice, it's the way the the pivot method is written.
Step26: <a id='review'></a>
Step27: Use this data to
|
7,550
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
def sigmoid(z):
return 1/(1 + np.exp(-z))
assert(sigmoid(0) == 0.5)
assert(sigmoid(10000) == 1.0)
assert(sigmoid(-10000) == 0.0)
plt.plot(np.arange(-10, 10, 0.5), [sigmoid(z) for z in np.arange(-10, 10, 0.5)])
plt.show()
# Sebastian Raschka 2015
# mlxtend Machine Learning Library Extensions
import numpy as np
class LogisticRegression(object):
Logistic regression classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
epochs : int
Passes over the training dataset.
learning : str (default: sgd)
Learning rule, sgd (stochastic gradient descent)
or gd (gradient descent).
lambda_ : float
Regularization parameter for L2 regularization.
No regularization if lambda_=0.0.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
cost_ : list
List of floats with sum of squared error cost (sgd or gd) for every
epoch.
def __init__(self, eta=0.01, epochs=50, lambda_=0.0, learning='sgd'):
self.eta = eta
self.epochs = epochs
self.lambda_ = lambda_
if not learning in ('sgd', 'gd'):
raise ValueError('learning must be sgd or gd')
self.learning = learning
def fit(self, X, y, init_weights=None):
Fit training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
init_weights : array-like, shape = [n_features + 1]
Initial weights for the classifier. If None, weights
are initialized to 0.
Returns
-------
self : object
if not len(X.shape) == 2:
raise ValueError('X must be a 2D array. Try X[:,np.newaxis]')
if (np.unique(y) != np.array([0, 1])).all():
raise ValueError('Supports only binary class labels 0 and 1')
if not isinstance(init_weights, np.ndarray):
# Initialize weights to 0
self.w_ = np.zeros(1 + X.shape[1])
else:
self.w_ = init_weights
self.cost_ = []
for i in range(self.epochs):
if self.learning == 'gd':
y_val = self.activation(X)
errors = (y - y_val)
regularize = self.lambda_ * self.w_[1:]
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[1:] += regularize
self.w_[0] += self.eta * errors.sum()
elif self.learning == 'sgd':
cost = 0.0
for xi, yi in zip(X, y):
yi_val = self.activation(xi)
error = (yi - yi_val)
regularize = self.lambda_ * self.w_[1:]
self.w_[1:] += self.eta * xi.dot(error)
self.w_[1:] += regularize
self.w_[0] += self.eta * error
self.cost_.append(self._logit_cost(y, self.activation(X)))
return self
def predict(self, X):
Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
class : int
Predicted class label.
# equivalent to np.where(self.activation(X) >= 0.5, 1, 0)
return np.where(self.net_input(X) >= 0.0, 1, 0)
def net_input(self, X):
Net input function.
return X.dot(self.w_[1:]) + self.w_[0]
def activation(self, X):
Predict class probabilities for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
int
Class probability.
z = self.net_input(X)
return self._sigmoid(z)
def _logit_cost(self, y, y_val):
logit = -y.dot(np.log(y_val)) - ((1 - y).dot(np.log(1 - y_val)))
regularize = (self.lambda_ / 2) * self.w_[1:].dot(self.w_[1:])
return logit + regularize
def _sigmoid(self, z):
return 1.0 / (1.0 + np.exp(-z))
import matplotlib.pyplot as plt
%matplotlib inline
import sys
import pandas as pd
import numpy as np
sys.path = ['/Users/sebastian/github/mlxtend/'] + sys.path
from mlxtend.evaluate import plot_decision_regions
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
# setosa and versicolor
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', 0, 1)
# sepal length and petal length
X = df.iloc[0:100, [0,2]].values
# standardize features
X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
lr = LogisticRegression(eta=0.1, epochs=100)
lr.fit(X_std, y)
plot_decision_regions(X_std, y, clf=lr, res=0.02)
plt.plot(range(len(lr.cost_)), lr.cost_)
plt.xlabel('Iterations')
plt.ylabel('Cost')
plt.title('Stochastic Gradient Descent')
#plt.text(40, 45, 'eta = 0.01')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step6: Cost Function and Gradient
Step7: Iris example
|
7,551
|
<ASSISTANT_TASK:>
Python Code:
# YOUR ACTION REQUIRED:
# Execute this cell first using <CTRL-ENTER> and then using <SHIFT-ENTER>.
# Note the difference in which cell is selected after execution.
print('Hello world!')
# YOUR ACTION REQUIRED:
# Execute only the first print statement by selecting the first line and pressing
# <CTRL-SHIFT-ENTER>.
print('Only print this line.')
print('Avoid printing this line.')
def xor_str(a, b):
return ''.join([chr(ord(a[i % len(a)]) ^ ord(b[i % len(b)]))
for i in range(max(len(a), len(b)))])
# YOUR ACTION REQUIRED:
# Try to find the correct value for the variable below.
workshop_secret = '(replace me!)'
xor_str(workshop_secret,
'\x03\x00\x02\x10\x00\x1f\x03L\x1b\x18\x00\x06\x07\x06K2\x19)*S;\x17\x08\x1f\x00\x05F\x1e\x00\x14K\x115\x16\x07\x10\x1cR1\x03\x1d\x1cS\x1a\x00\x13J')
# Hint: You might want to checkout the ../solutions directory
# (you should already have opened this directory in a browser tab :-)
# We must call this "magic" before importing TensorFlow. We will explain
# further down what "magics" (starting with %) are.
%tensorflow_version 2.x
# Include basic dependencies and display the tensorflow version.
import tensorflow as tf
tf.__version__
# Print the current working directory and list all files in it.
!pwd
!ls
# Especially useful: Installs new packages.
!pip install qrcode
import qrcode
qrcode.make('Colab rocks!')
# YOUR ACTION REQUIRED:
# Set the cursor to after tf.one and press <CTRL-SPACE>.
# On Mac, only <OPTION-ESCAPE> might work.
tf.one
# YOUR ACTION REQUIRED:
# Complete the command to `tf.maximum` and then add the opening bracket "(" to
# see the function documentation.
tf.maximu
tf.maximum?
test_dict = {'key0': 'Tensor', 'key1': 'Flow'}
test_dict?
# Display how long the system has been running.
# Note : this shows "0 users" because no user is logged in via SSH.
!uptime
# Display available and used memory.
!free -h
print("-"*70)
# Display the CPU specification.
!lscpu
print("-"*70)
# Display the GPU specification (if available).
!(nvidia-smi | grep -q "has failed") && echo "No GPU found!" || nvidia-smi
# Display the Matplotlib outputs within a cell's output.
%matplotlib inline
import numpy as np
from matplotlib import pyplot
# Create a randomized scatterplot using matplotlib.
x = np.random.rand(100).astype(np.float32)
noise = np.random.normal(scale=0.3, size=len(x))
y = np.sin(x * 7) + noise
pyplot.scatter(x, y)
# Load an example dataset.
from vega_datasets import data
cars = data.cars()
# Plot the dataset, referencing dataframe column names.
import altair as alt
alt.Chart(cars).mark_point().encode(
x='Horsepower',
y='Miles_per_Gallon',
color='Origin',
tooltip=['Name', 'Origin', 'Horsepower', 'Miles_per_Gallon']
).interactive()
%%sh
echo "This is a shell script!"
# List all running VM processes.
ps -ef
echo "Done"
# Embed custom HTML directly into a cell's output.
%%html
<marquee>HTML rocks</marquee>
n = 1000000
%time list1 = [i for i in range(n)]
print("")
%time list2 = [i for i in range(int(n/2))]
%%time
n = 1000000
list1 = [i for i in range(n)]
list2 = [i for i in range(int(n/2))]
from google.colab import auth
auth.authenticate_user()
!gsutil ls gs://amld-datasets/zoo_img | head
# Note: This cell hangs if you forget to call auth.authenticate_user() above.
tf.io.gfile.glob('gs://amld-datasets/zoo_img/*')[:10]
# YOUR ACTION REQUIRED:
# Explore existing snippets by going to the `Code snippets` section.
# Click on the <> button on the left sidebar to open the snippets.
# Alternatively, you can press `<CTRL><ALT><P>` (or `<COMMAND><OPTION><P>` for
# OS X).
from google.colab import snippets
# snippets.register('https://colab.research.google.com/drive/1OFSjEmqC-UC66xs-LR7-xmgkvxYTrAcN')
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def mymagic(line_content, cell_content=None):
print('line_content="%s" cell_content="%s"' % (line_content, cell_content))
%mymagic Howdy Alice!
%%mymagic simple question
Howdy Alice!
how are you?
#@title Execute me
# Hidden cell content.
print("Double click the cell to see its content.")
# Form example mostly taken from "Adding form fields" Snippet.
#@title Example form
#@markdown Specify some test data and execute this cell.
string_type = 'test_string' #@param {type: "string"}
slider_value = 145 #@param {type: "slider", min: 100, max: 200}
number = 1339 #@param {type: "number"}
date = '2019-01-26' #@param {type: "date"}
pick_me = "a" #@param ['a', 'b', 'c']
#@markdown ---
print("Submitted data:")
print(string_type, slider_value, number, date, pick_me)
# YOUR ACTION REQUIRED:
# Execute this cell, print the variable contents of a, b and exit the debugger.
%pdb on
a = 67069 / 47 - 0x5a
b = a - 0x539
#c = a / b # Will throw an exception.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can also only execute one single statement in a cell.
Step2: What to do if you get stuck
Step3: Importing TensorFlow
Step4: Running shell commands
Step5: Autocompletion and docstrings
Step6: In addition, you can also display docstrings to see the function signature and possible parameters.
Step7: Alternatively, you might also inspect function details with docstrings if available by appending a "?".
Step8: Note
Step9: Runtimes
Step10: As can be seen, the machine has been allocated just very recently for our purposes.
Step11: Plotting
Step12: Altair
Step13: Notebook Magics
Step14: Line magics
Step15: Note
Step16: Data handling
Step17: List a subset of the contained files using the gsutil tool.
Step18: Conveniently, TensorFlow natively supports multiple file systems such as
Step19: Snippets
Step20: We have created some default snippets for this workshop in
Step21: Pro tip
Step22: Forms
Step23: Interactive debugging
|
7,552
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import openmc
uo2 = openmc.Material(1, "uo2")
print(uo2)
mat = openmc.Material()
print(mat)
help(uo2.add_nuclide)
# Add nuclides to uo2
uo2.add_nuclide('U235', 0.03)
uo2.add_nuclide('U238', 0.97)
uo2.add_nuclide('O16', 2.0)
uo2.set_density('g/cm3', 10.0)
zirconium = openmc.Material(2, "zirconium")
zirconium.add_element('Zr', 1.0)
zirconium.set_density('g/cm3', 6.6)
water = openmc.Material(3, "h2o")
water.add_nuclide('H1', 2.0)
water.add_nuclide('O16', 1.0)
water.set_density('g/cm3', 1.0)
water.add_s_alpha_beta('c_H_in_H2O')
mats = openmc.Materials([uo2, zirconium, water])
mats = openmc.Materials()
mats.append(uo2)
mats += [zirconium, water]
isinstance(mats, list)
mats.export_to_xml()
!cat materials.xml
water.remove_nuclide('O16')
water.add_element('O', 1.0)
mats.export_to_xml()
!cat materials.xml
!cat $OPENMC_CROSS_SECTIONS | head -n 10
print(' ...')
!cat $OPENMC_CROSS_SECTIONS | tail -n 10
uo2_three = openmc.Material()
uo2_three.add_element('U', 1.0, enrichment=3.0)
uo2_three.add_element('O', 2.0)
uo2_three.set_density('g/cc', 10.0)
# Create PuO2 material
puo2 = openmc.Material()
puo2.add_nuclide('Pu239', 0.94)
puo2.add_nuclide('Pu240', 0.06)
puo2.add_nuclide('O16', 2.0)
puo2.set_density('g/cm3', 11.5)
# Create the mixture
mox = openmc.Material.mix_materials([uo2, puo2], [0.97, 0.03], 'wo')
sph = openmc.Sphere(r=1.0)
inside_sphere = -sph
outside_sphere = +sph
print((0,0,0) in inside_sphere, (0,0,2) in inside_sphere)
print((0,0,0) in outside_sphere, (0,0,2) in outside_sphere)
z_plane = openmc.ZPlane(z0=0)
northern_hemisphere = -sph & +z_plane
northern_hemisphere.bounding_box
cell = openmc.Cell()
cell.region = northern_hemisphere
# or...
cell = openmc.Cell(region=northern_hemisphere)
cell.fill = water
universe = openmc.Universe()
universe.add_cell(cell)
# this also works
universe = openmc.Universe(cells=[cell])
universe.plot(width=(2.0, 2.0))
universe.plot(width=(2.0, 2.0), basis='xz')
universe.plot(width=(2.0, 2.0), basis='xz',
colors={cell: 'fuchsia'})
fuel_or = openmc.ZCylinder(r=0.39)
clad_ir = openmc.ZCylinder(r=0.40)
clad_or = openmc.ZCylinder(r=0.46)
fuel_region = -fuel_or
gap_region = +fuel_or & -clad_ir
clad_region = +clad_ir & -clad_or
fuel = openmc.Cell(1, 'fuel')
fuel.fill = uo2
fuel.region = fuel_region
gap = openmc.Cell(2, 'air gap')
gap.region = gap_region
clad = openmc.Cell(3, 'clad')
clad.fill = zirconium
clad.region = clad_region
pitch = 1.26
left = openmc.XPlane(x0=-pitch/2, boundary_type='reflective')
right = openmc.XPlane(x0=pitch/2, boundary_type='reflective')
bottom = openmc.YPlane(y0=-pitch/2, boundary_type='reflective')
top = openmc.YPlane(y0=pitch/2, boundary_type='reflective')
water_region = +left & -right & +bottom & -top & +clad_or
moderator = openmc.Cell(4, 'moderator')
moderator.fill = water
moderator.region = water_region
box = openmc.rectangular_prism(width=pitch, height=pitch,
boundary_type='reflective')
type(box)
water_region = box & +clad_or
root = openmc.Universe(cells=(fuel, gap, clad, moderator))
geom = openmc.Geometry()
geom.root_universe = root
# or...
geom = openmc.Geometry(root)
geom.export_to_xml()
!cat geometry.xml
point = openmc.stats.Point((0, 0, 0))
src = openmc.Source(space=point)
settings = openmc.Settings()
settings.source = src
settings.batches = 100
settings.inactive = 10
settings.particles = 1000
settings.export_to_xml()
!cat settings.xml
cell_filter = openmc.CellFilter(fuel)
t = openmc.Tally(1)
t.filters = [cell_filter]
t.nuclides = ['U235']
t.scores = ['total', 'fission', 'absorption', '(n,gamma)']
tallies = openmc.Tallies([t])
tallies.export_to_xml()
!cat tallies.xml
openmc.run()
!cat tallies.out
p = openmc.Plot()
p.filename = 'pinplot'
p.width = (pitch, pitch)
p.pixels = (200, 200)
p.color_by = 'material'
p.colors = {uo2: 'yellow', water: 'blue'}
plots = openmc.Plots([p])
plots.export_to_xml()
!cat plots.xml
openmc.plot_geometry()
!convert pinplot.ppm pinplot.png
from IPython.display import Image
Image("pinplot.png")
p.to_ipython_image()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Defining Materials
Step2: On the XML side, you have no choice but to supply an ID. However, in the Python API, if you don't give an ID, one will be automatically generated for you
Step3: We see that an ID of 2 was automatically assigned. Let's now move on to adding nuclides to our uo2 material. The Material object has a method add_nuclide() whose first argument is the name of the nuclide and second argument is the atom or weight fraction.
Step4: We see that by default it assumes we want an atom fraction.
Step5: Now we need to assign a total density to the material. We'll use the set_density for this.
Step6: You may sometimes be given a material specification where all the nuclide densities are in units of atom/b-cm. In this case, you just want the density to be the sum of the constituents. In that case, you can simply run mat.set_density('sum').
Step7: An astute observer might now point out that this water material we just created will only use free-atom cross sections. We need to tell it to use an $S(\alpha,\beta)$ table so that the bound atom cross section is used at thermal energies. To do this, there's an add_s_alpha_beta() method. Note the use of the GND-style name "c_H_in_H2O".
Step8: When we go to run the transport solver in OpenMC, it is going to look for a materials.xml file. Thus far, we have only created objects in memory. To actually create a materials.xml file, we need to instantiate a Materials collection and export it to XML.
Step9: Note that Materials is actually a subclass of Python's built-in list, so we can use methods like append(), insert(), pop(), etc.
Step10: Finally, we can create the XML file with the export_to_xml() method. In a Jupyter notebook, we can run a shell command by putting ! before it, so in this case we are going to display the materials.xml file that we created.
Step11: Element Expansion
Step12: We see that now O16 and O17 were automatically added. O18 is missing because our cross sections file (which is based on ENDF/B-VII.1) doesn't have O18. If OpenMC didn't know about the cross sections file, it would have assumed that all isotopes exist.
Step13: Enrichment
Step14: Mixtures
Step15: The 'wo' argument in the mix_materials() method specifies that the fractions are weight fractions. Materials can also be mixed by atomic and volume fractions with 'ao' and 'vo', respectively. For 'ao' and 'wo' the fractions must sum to one. For 'vo', if fractions do not sum to one, the remaining fraction is set as void.
Step16: Note that by default the sphere is centered at the origin so we didn't have to supply x0, y0, or z0 arguments. Strictly speaking, we could have omitted R as well since it defaults to one. To get the negative or positive half-space, we simply need to apply the - or + unary operators, respectively.
Step17: Now let's see if inside_sphere actually contains points inside the sphere
Step18: Everything works as expected! Now that we understand how to create half-spaces, we can create more complex volumes by combining half-spaces using Boolean operators
Step19: For many regions, OpenMC can automatically determine a bounding box. To get the bounding box, we use the bounding_box property of a region, which returns a tuple of the lower-left and upper-right Cartesian coordinates for the bounding box
Step20: Now that we see how to create volumes, we can use them to create a cell.
Step21: By default, the cell is not filled by any material (void). In order to assign a material, we set the fill property of a Cell.
Step22: Universes and in-line plotting
Step23: The Universe object has a plot method that will display our the universe as current constructed
Step24: By default, the plot will appear in the $x$-$y$ plane. We can change that with the basis argument.
Step25: If we have particular fondness for, say, fuchsia, we can tell the plot() method to make our cell that color.
Step26: Pin cell geometry
Step27: With the surfaces created, we can now take advantage of the built-in operators on surfaces to create regions for the fuel, the gap, and the clad
Step28: Now we can create corresponding cells that assign materials to these regions. As with materials, cells have unique IDs that are assigned either manually or automatically. Note that the gap cell doesn't have any material assigned (it is void by default).
Step29: Finally, we need to handle the coolant outside of our fuel pin. To do this, we create x- and y-planes that bound the geometry.
Step30: The water region is going to be everything outside of the clad outer radius and within the box formed as the intersection of four half-spaces.
Step31: OpenMC also includes a factory function that generates a rectangular prism that could have made our lives easier.
Step32: Pay attention here -- the object that was returned is NOT a surface. It is actually the intersection of four surface half-spaces, just like we created manually before. Thus, we don't need to apply the unary operator (-box). Instead, we can directly combine it with +clad_or.
Step33: The final step is to assign the cells we created to a universe and tell OpenMC that this universe is the "root" universe in our geometry. The Geometry is the final object that is actually exported to XML.
Step34: Starting source and settings
Step35: Now let's create a Settings object and give it the source we created along with specifying how many batches and particles we want to run.
Step36: User-defined tallies
Step37: The what is the total, fission, absorption, and (n,$\gamma$) reaction rates in $^{235}$U. By default, if we only specify what reactions, it will gives us tallies over all nuclides. We can use the nuclides attribute to name specific nuclides we're interested in.
Step38: Similar to the other files, we need to create a Tallies collection and export it to XML.
Step39: Running OpenMC
Step40: Great! OpenMC already told us our k-effective. It also spit out a file called tallies.out that shows our tallies. This is a very basic method to look at tally data; for more sophisticated methods, see other example notebooks.
Step41: Geometry plotting
Step42: With our plot created, we need to add it to a Plots collection which can be exported to XML.
Step43: Now we can run OpenMC in plotting mode by calling the plot_geometry() function. Under the hood this is calling openmc --plot.
Step44: OpenMC writes out a peculiar image with a .ppm extension. If you have ImageMagick installed, this can be converted into a more normal .png file.
Step45: We can use functionality from IPython to display the image inline in our notebook
Step46: That was a little bit cumbersome. Thankfully, OpenMC provides us with a method on the Plot class that does all that "boilerplate" work.
|
7,553
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
wine = pd.read_csv("wine.csv")
baseline = wine.Price.mean()
print (baseline)
baseline = wine.Price.mean()
%matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(wine.AGST, wine.Price)
plt.hlines(baseline, 15, 18, color = 'red', label="Baseline")
plt.legend(loc="upper left")
plt.title("Price vs. Average Growing Season Temp of Bordeaux wine bottles")
plt.xlabel("Temperature [Celsius degrees]")
plt.ylabel("Log of Price")
plt.grid()
plt.show()
import numpy as np
x = np.linspace(15, 18, 10) # 100 evenly-spaced values from 0 to 50
y = 0.5 * x - 1.25 # a simple model
plt.scatter(wine.AGST, wine.Price)
plt.hlines(baseline, 15, 18, color = 'red', label="Baseline")
plt.plot(x,y, label="Simple linear regression")
plt.legend(loc="upper left")
plt.title("Wine price vs. Average Growing Season Temperature")
plt.xlabel("Temperature [Celsius degrees]")
plt.ylabel("Log of Price")
plt.grid()
plt.show()
wine.iloc[0]
price1952 = wine.iloc[0].Price
predictedPrice = 0.5 * 17.1167 - 1.25; print (predictedPrice)
price1952 - predictedPrice
y_bottom = 6
y_top = 9
y_range = y_top - y_bottom
plt.ylim(y_bottom, y_top)
plt.scatter(wine.AGST, wine.Price) # the actual points
plt.plot(x,y, label="Simple linear regression") # the regression line
# now let's plot the residuals
for i in range(0, len(wine)):
# first we need the coordinates of the actual point
x_point = wine.AGST[i]
y_point = wine.Price[i]
# then we need the say how long is the vertical line
# the vertical line must be between 0 and 1
y1 = (y_point - y_bottom) / y_range # scale
y2 = ((0.5 * x_point - 1.25) - y_bottom) / y_range # scale
# now we can plot the vertical RED residual line
plt.axvline(x_point, ymin=y1, ymax=y2, color="red")
plt.title("Residuals of simple model y=0.5x-1.25")
plt.xlabel("Temperature [Celsius degrees]")
plt.ylabel("Log of Price")
plt.grid()
SSE_baseline = np.sum((wine.Price - baseline)**2); SSE_baseline
# Get SSE for our simple linear regression (LR) model
predictedPrices = 0.5 * wine.AGST - 1.25
SSE_simpleLR = np.sum((wine.Price - predictedPrices)**2)
SSE_simpleLR
n = len(wine.Price) # number of examples
MSE_baseline = SSE_baseline / n; MSE_baseline
MSE_simpleLR = SSE_simpleLR / n; MSE_simpleLR
RMSE_baseline = MSE_baseline ** 0.5; RMSE_baseline
RMSE_simpleLR = MSE_simpleLR ** 0.5; RMSE_simpleLR
R2 = 1 - SSE_simpleLR / SSE_baseline; R2
X = wine.AGST # the known variable
Y = wine.Price # the variable to predict
numerator = sum(X*Y) - (1/n)*((sum(X)*sum(Y)))
numerator
denominator = sum(X**2) - (1/n)*((sum(X)*sum(X)))
denominator
slope = numerator / denominator
slope # this is beta1 coefficient
intercept = Y.mean() - slope*(X.mean())
intercept # this is beta0 coefficient
y2 = 0.6351 * x - 3.4178 # solution from closed form
plt.scatter(wine.AGST, wine.Price)
plt.hlines(baseline, 15, 18, color = 'red', label="Baseline")
plt.plot(x,y, label="Simple linear regression")
plt.plot(x,y2, label = "Optimal linear regression")
plt.legend(loc="upper left")
plt.title("Wine price vs. Average Temperature")
plt.xlabel("Temperature [Celsius degrees]")
plt.ylabel("Log of Price")
plt.grid()
plt.show()
y2 = 0.6351 * x - 3.4178
plt.plot(x,y2) # regression line
# draw dashed lines
x1_point = 17.0
x2_point = 16.0
y1_point = 0.6351 * x1_point - 3.4178
y2_point = 0.6351 * x2_point - 3.4178
plt.plot((x1_point, x1_point), (y1_point, y2_point), 'g--')
plt.plot((x2_point, x1_point), (y2_point, y2_point), 'g--')
# add notes next to the dashed lines
plt.annotate("Price difference for 1 degree difference", xy=(17.05,7))
plt.annotate("1 degree ", xy=(16.4, 6.6))
plt.title("Interpreting the slope")
plt.xlabel("Temperature [Celsius degrees]")
plt.ylabel("Log of Price")
plt.grid()
plt.show()
-3.4178 + 0.6351 * 18
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let’s say the average is 7.07
Step2: As you can see, in this example the baseline never predicts the correct value, only a couple of times goes close but – worst of all – does not follow the trend at all
Step3: Looks a better prediction than the baseline!
Step4: The actual price for 1952 bottles was 7.459 and the temperature in 1952 was bit more than 17 degrees.
Step5: The simple model y=0.5*x-1.25 would predict for a Temperature = 17.1167 a price of
Step6: The difference between the actual value (7.4950) and the predicted value (7.3085) is the residual.
Step7: The first data point is then represented by this formula including the residual
Step8: The residuals are the vertical red lines
Step9: Here wine.Price is a vector (specifically, it is a pandas Series which is based on numpy arrays) which can be subtracted to a scalar (our baseline), squared and produces a new vector, which can be the input of the numpy function sum() and this just outputs the sum of its elements.
Step10: The formula seems the same but in this case the vector wine.AGST is multiplied to a scalar and then subtracted to a different scalar [10], then wine.Price is subtracted this time to another vector, the wine.AGST and squared and numpy takes care of this operation [11]. Very practical!
Step11: which tells us that the average squared error for each data point is around 0.41 for the baseline and a better 0.24 for our LR model.
Step12: which tells us the average error is around 0.64 for the baseline and a better 0.49 for our LR model.
Step13: R2 is by definition a number between 0 and 1 (assuming the SST is not zero)
Step14: Therefore the final linear regression model is
Step15: that gives an SSE of 5.73, an RMSE of 0.479 and an R2 of 0.435
Step16: How can I use the formula to predict a price?
|
7,554
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
mnist_data = input_data.read_data_sets('/tmp/data', one_hot=True)
## Visualize a sample subset of data
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
f,a = plt.subplots(5,10,figsize=(10,5))
for i in range(5):
for j in range(10):
index = (i-1)*5 + j
a[i][j].imshow(np.reshape(mnist_data.test.images[index],(28,28)), cmap='Greys_r')
f.show()
## set learning parameters
learning_rate = 0.5
batch_size = 128
trainig_iters = 2000
dispay_step = 20
# set network parameters
num_weights = 32
num_dims = 784 ## number of input pixels
num_classes = 10
num_layers = 1 ## number of hidden layers
# create placeholders for data
x = tf.placeholder(tf.float32, [None, num_dims])
y_ = tf.placeholder(tf.float32, [None, num_classes])
#### 2-D tensor of floating-point numbers, with a shape [None, 784].
#### --> None means that a dimension can be of any length
#### --> placeholder x is store a batch of data samples
#### --> placeholder y_ is for the true (onehot emcoder) labels
## define weights: intiailize using
weights = tf.Variable(tf.truncated_normal([num_dims, num_classes],
mean=0, stddev=1.0/num_dims))
biases = tf.Variable(tf.zeros(shape=[num_classes]))
# --> intiial weights are normal distribited, with sigma=(1/n)
## define the model (network)
y = tf.nn.softmax(tf.matmul(x, weights) + biases)
## define the loss-function: Cross-Entropy Loss Function
### One way to define the loss is as follows
### (but it is numericlly unstable and should be avoided)
# cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#### --> tf.reduce_sum adds the elements in dimension s[ecified by reduction_indices
#### --> tf.reduce_mean computes the mean over all the examples in the batch
## Instead, we use tf.nn.softmax_cross_entropy_with_logits
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_))
## Training:
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
## define initialization of variables
init = tf.initialize_all_variables()
## start a Tensorflow session and intitalize variables
sess = tf.Session()
sess.run(init)
losses = []
for i in range(trainig_iters):
batch_xs, batch_ys = mnist_data.train.next_batch(batch_size)
##sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
_, loss_val = sess.run([train_step, cross_entropy],
feed_dict={x: batch_xs, y_: batch_ys})
losses.append(loss_val)
fig = plt.figure(figsize=(10,5))
plt.plot(np.arange(len(losses)), losses)
plt.show()
correct_prediction = tf.equal(tf.argmax(y,dimension=1), tf.argmax(y_,dimension=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist_data.test.images, y_: mnist_data.test.labels}))
with sess.as_default():
W = weights.eval()
fig,ax = plt.subplots(2,5,figsize=(20,8))
for i in range(10):
ax[i/5][i%5].imshow(np.reshape(W[:,i], (28,28)), cmap='Greys_r')
fig.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize a sample subset of data
Step2: Side Note
Step3: Tensorflow Session
Step4: Evaluating the model
|
7,555
|
<ASSISTANT_TASK:>
Python Code:
#This notebook also uses the `(some) LaTeX environments for Jupyter`
#https://github.com/ProfFan/latex_envs wich is part of the
#jupyter_contrib_nbextensions package
from myhdl import *
from myhdlpeek import Peeker
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sympy import *
init_printing()
import itertools
#EE drawing tools in python from https://cdelker.bitbucket.io/SchemDraw/
import SchemDraw as schem
import SchemDraw.elements as e
import SchemDraw.logic as l
#https://github.com/jrjohansson/version_information
%load_ext version_information
%version_information myhdl, myhdlpeek, numpy, pandas, matplotlib, sympy, itertools, SchemDraw
#helper functions to read in the .v and .vhd generated files into python
def VerilogTextReader(loc, printresult=True):
with open(f'{loc}.v', 'r') as vText:
VerilogText=vText.read()
if printresult:
print(f'***Verilog modual from {loc}.v***\n\n', VerilogText)
return VerilogText
def VHDLTextReader(loc, printresult=True):
with open(f'{loc}.vhd', 'r') as vText:
VerilogText=vText.read()
if printresult:
print(f'***VHDL modual from {loc}.vhd***\n\n', VerilogText)
return VerilogText
def ConstraintXDCTextReader(loc, printresult=True):
with open(f'{loc}.xdc', 'r') as xdcText:
ConstraintText=xdcText.read()
if printresult:
print(f'***Constraint file from {loc}.xdc***\n\n', ConstraintText)
return ConstraintText
def TruthTabelGenrator(BoolSymFunc):
Function to generate a truth table from a sympy boolian expression
BoolSymFunc: sympy boolian expression
return TT: a Truth table stored in a pandas dataframe
colsL=sorted([i for i in list(BoolSymFunc.rhs.atoms())], key=lambda x:x.sort_key())
colsR=sorted([i for i in list(BoolSymFunc.lhs.atoms())], key=lambda x:x.sort_key())
bitwidth=len(colsL)
cols=colsL+colsR; cols
TT=pd.DataFrame(columns=cols, index=range(2**bitwidth))
for i in range(2**bitwidth):
inputs=[int(j) for j in list(np.binary_repr(i, bitwidth))]
outputs=BoolSymFunc.rhs.subs({j:v for j, v in zip(colsL, inputs)})
inputs.append(int(bool(outputs)))
TT.iloc[i]=inputs
return TT
d = schem.Drawing(unit=.5)
d.add(e.VDD, label='High')
d.add(e.LINE, l=d.unit*2, d='down')
d.add(e.DOT_OPEN, botlabel='$1$')
d.draw()
Y=symbols('Y')
YEq=Eq(Y, 1); YEq
#decorator to specify a myHDL module that can be converted
#to myHDL
@block
def WireHigh(Y):
Wire to Constatnt High
Input:
None
Output:
Y(bool): output
#create signal to High Source
HSource=Signal(bool(1))
#decorator for a compintorical senstvity
@always_comb
def logic():
#internal function to specfiy Out is set to High Source
Y.next=HSource
#return all internal functions
return instances()
#create output signal to bind to `WireHigh` Mod
Y=Signal(bool(0))
#Create instatnce of `WireHigh` and bind `Out` Signal to it
DUT=WireHigh(Y)
#Run convert on `DUT` instance of `WireHigh` to create Verilog/VHDL
DUT.convert()
#Read ouput `WireHigh.v` into notebook
VerilogTextReader('WireHigh');
d = schem.Drawing(unit=.5)
d.add(e.VDD, label='High')
d.add(e.LINE, l=d.unit*2, d='down')
d.add(e.DOT, rgtlabel='$1$')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.LED, rgtlabel='LD0', d='down')
#LED to ground
d.add(e.LINE, d='down', l=d.unit*1.5)
d.add(e.GND)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_WireHigh');
d = schem.Drawing(unit=.5)
d.add(e.DOT_OPEN, label='$0$')
d.add(e.LINE, l=d.unit*2, d='down')
d.add(e.GND)
d.draw()
Y=Symbol('Y')
YEq=Eq(Y, 0); YEq
#decorator to specify a myHDL module that can be converted
#to myHDL
@block
def WireLow(Y):
Wire to Constatnt Low
Input:
None
Output:
Y(bool): output
#create signal to Low Source
LSource=Signal(bool(0))
#decorator for a compintorical senstvity
@always_comb
def logic():
#internal function to specfiy Out is set to Low Source
Y.next=LSource
#return all internal functions
return instances()
#create output signal to bind to `WireHigh` Mod
Y=Signal(bool(0))
#Create instatnce of `WireHigh` and bind `Out` Signal to it
DUT=WireLow(Y)
#Run convert on `DUT` instance of `WireHigh` to create Verilog/VHDL
DUT.convert()
#Read ouput `WireHigh.v` into notebook
VerilogTextReader('WireLow');
d = schem.Drawing(unit=.5)
LD0=d.add(e.LED, d='right', label='LD0')
d.add(e.LINE, l=d.unit*2, d='left', xy=LD0.start)
d.add(e.LINE, l=d.unit*2, d='down')
d.add(e.GND)
d.add(e.LINE, l=d.unit*2, d='right', xy=LD0.end)
d.add(e.LINE, l=d.unit*2, d='down')
d.add(e.GND)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_WireLow');
d = schem.Drawing(unit=.5)
d.add(e.DOT_OPEN, l=d.unit*3, lftlabel='$X$')
d.add(l.BUF, label='$Buffer$')
d.add(e.DOT_OPEN, l=d.unit*2, rgtlabel='$Y$')
d.draw()
X, Y=symbols('X, Y')
YEq=Eq(Y, X); YEq
TruthTabelGenrator(YEq)
#convert the Sympy exspresion to a
#numpy exspresion via lambdify
YEqN=lambdify(X, YEq.rhs, dummify=False)
YEqN(np.array([0, 1]))
@block
def BufferGate(X, Y):
Buffer Gate demo module
Input:
X(bool): Input conection to wire between `X` and `Y`
Ouput:
Y(bool): Output connection to wire between 'X' and 'Y'
@always_comb
def logic():
Y.next=X
return instances()
#generate random test values for BufferGate_TB
#stimules input In
TestLen=10
#for testing purpose the random genrator need to be seeded
#to create reproducable values
np.random.seed(12)
XTVs=np.random.randint(0,2, TestLen).astype(int)
XTVs
#clear any previouse stored values in Peeker
Peeker.clear()
#Create the Signals to the BufferGate and
#add them to Peeker for display and anylis
X=Signal(bool(0)); Peeker(X, 'X')
Y=Signal(bool(0)); Peeker(Y, 'Y')
#create instance of `BufferGate` and bind signals
#to it as the "Device Under Testing"
DUT=BufferGate(X, Y)
#Create a Testbench for `BufferGate`
def BufferGate_TB():
myHDL only testbench for module `BufferGate`
#internal function that will apply
#stimules to DUT
@instance
def stimules():
for i in range(TestLen):
#asign next stimules value to X input of DUT
#for every 1 interval delay
X.next=int(XTVs[i])
yield delay(1)
#flag to end the simulation of the DUT after all
#stimules have been run
raise StopSimulation()
#return all internal stimules
return instances()
#bind the DUT, Testbench and Peeker to simulator and run it
sim=Simulation(DUT, BufferGate_TB(), *Peeker.instances()).run()
#View Waveform diagram from Simulation with
#wavefroms in the order of `X` then `Y`
Peeker.to_wavedrom('X', 'Y')
#Capture the Waveform as a Pandas Dataframe for
#detiled anylsis
WireData=Peeker.to_dataframe()
#order the cols in the dataframe `X` then `Y`
WireData=WireData[['X', 'Y']]
WireData
#apply the lampdfid sympy expspresion for a buffer to the X input to
#get a refrance to compare the BufferGate modules
WireData['YRef']=WireData['X'].apply(YEqN)
WireData
#Test wither the modules ouput matchs the sympy refrance
Test=(WireData['Y']==WireData['YRef']).all()
print(f'Module `BufferGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('BufferGate');
#create BitVector for BufferGate_TBV
XTVs=intbv(int(''.join(XTVs.astype(str)), 2))[TestLen:]
XTVs, bin(XTVs)
#Convertable testbenchs must be treated as a module
@block
def BufferGate_TBV():
myHDL -> Verilog testbench for module `BufferGate`
#create the signals to bind to the DUT
X=Signal(bool(0))
Y=Signal(bool(0))
#print out the waveform of `X` and `Y`
@always_comb
def print_data():
print(X, Y)
#create a register to hold the testdata vector
#Test Signal Bit Vector
XTV=Signal(XTVs)
#create an instance of the `BufferGate` and
#bind X and Y to it
DUT=BufferGate(X, Y)
#create the stimules action
@instance
def stimules():
for i in range(TestLen):
#asign X the next bit from XTV testvector reg
X.next=XTV[i]
#delay one of the smallest time intervals
yield delay(1)
#raise the stop simulation flag
raise StopSimulation()
#return all internals of the Testbench
return instances()
#create instance of the Testbench
TB=BufferGate_TBV()
#run the conversion to Verilog and insure that
#all Signal values are at there default value
TB.convert(hdl="Verilog", initial_values=True)
#read back the testbench into python
VerilogTextReader('BufferGate_TBV');
d=schem.Drawing(unit=.5)
#add switch
SW0=d.add(e.SWITCH_SPDT2, reverse=True, label='SW0')
#connect to buffer
d.add(e.LINE, d='right', xy=SW0.a, l=d.unit*2, label='X')
d.add(e.DOT)
B0=d.add(l.BUF, label='BUF')
#buffer to LED
d.add(e.DOT)
d.add(e.LINE, d='right', l=d.unit*2, label='Y')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.LED, rgtlabel='LD0', d='down')
#LED to ground
d.add(e.LINE, d='down', l=d.unit*1.5)
d.add(e.GND)
#swtich bottom to ground
d.add(e.LINE, d='down', xy=SW0.c, l=d.unit*4)
d.add(e.GND)
#switch top to rail
d.add(e.LINE, d='up', xy=SW0.b, l=d.unit*2.3)
d.add(e.VDD)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_BufferGate');
d = schem.Drawing(unit=.5)
d.add(e.DOT_OPEN, l=d.unit*3, lftlabel='$X$')
d.add(l.NOT, label='$NOT$')
d.add(e.LINE)
d.add(e.DOT_OPEN, rgtlabel='$Y$')
d.draw()
X, Y=symbols('X, Y')
YEq=Eq(Y, ~X); YEq
TruthTabelGenrator(YEq)
YEqN=lambdify(X, YEq.rhs, dummify=False)
YEqN(0), YEqN(1)
@block
def NotGate(X, Y):
NOT gate exsample module
Input:
X(bool): input
Output:
Y(bool): ouput
@always_comb
def logic():
Y.next=not X
return instances()
#generate random test values for NotGate_TB
#stimules input In
TestLen=10
np.random.seed(14)
XTVs=np.random.randint(0,2, TestLen).astype(int)
XTVs
Peeker.clear()
X=Signal(bool(0)); Peeker(X, 'X')
Y=Signal(bool(0)); Peeker(Y, 'Y')
DUT=NotGate(X, Y)
def NotGate_TB():
myHDL only testbench for module `NotGate`
@instance
def stimules():
for i in range(TestLen):
X.next=int(XTVs[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, NotGate_TB(), *Peeker.instances()).run()
Peeker.to_wavedrom()
NotData=Peeker.to_dataframe()
NotData
NotData['YRef']=NotData['X'].apply(YEqN).astype(int)
NotData
Test=(NotData['Y']==NotData['YRef']).all()
print(f'Module `NotGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('NotGate');
#create BitVector for NotGate_TBV
XTVs=intbv(int(''.join(XTVs.astype(str)), 2))[TestLen:]
XTVs, bin(XTVs)
@block
def NotGate_TBV():
myHDL -> Verilog testbench for module `NotGate`
X=Signal(bool(0))
Y=Signal(bool(0))
@always_comb
def print_data():
print(X, Y)
#Test Signal Bit Vector
XTV=Signal(XTVs)
DUT=NotGate(X, Y)
@instance
def stimules():
for i in range(TestLen):
X.next=XTV[i]
yield delay(1)
raise StopSimulation()
return instances()
TB=NotGate_TBV()
TB.convert(hdl="Verilog", initial_values=True)
VerilogTextReader('NotGate_TBV');
d=schem.Drawing(unit=.5)
#add switch
SW0=d.add(e.SWITCH_SPDT2, reverse=True, label='SW0')
#connect to buffer
d.add(e.LINE, d='right', xy=SW0.a, l=d.unit*2, label='X')
d.add(e.DOT)
N0=d.add(l.NOT, label='NOT')
#buffer to LED
d.add(e.DOT)
d.add(e.LINE, d='right', l=d.unit*2, label='Y')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.LED, rgtlabel='LD0', d='down')
#LED to ground
d.add(e.LINE, d='down', l=d.unit*1.5)
d.add(e.GND)
#swtich bottom to ground
d.add(e.LINE, d='down', xy=SW0.c, l=d.unit*4)
d.add(e.GND)
#switch top to rail
d.add(e.LINE, d='up', xy=SW0.b, l=d.unit*2.3)
d.add(e.VDD)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_NotGate');
d = schem.Drawing(unit=.5)
G = d.add(l.AND2, label='$AND_2$')
d.add(e.DOT_OPEN, xy=G.out, rgtlabel='$Y$')
d.add(e.DOT_OPEN, xy=G.in1, lftlabel='$X_1$')
d.add(e.DOT_OPEN, xy=G.in2, lftlabel='$X_0$')
d.draw()
X0, X1, Y=symbols('X_0, X_1, Y')
YEq=Eq(Y, X0&X1); YEq
TruthTabelGenrator(YEq)
YEqN=lambdify([X0, X1], YEq.rhs, dummify=False)
SystmaticVals=np.array(list(itertools.product([0,1], repeat=2)))
SystmaticVals, YEqN(SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)
@block
def AndGate(X0, X1, Y):
And Gate demo module
Input:
X0(bool): And gate input 0
X1(bool): And gate input 1
Output:
Y(bool): And gate ouput
@always_comb
def logic():
#note here that `and` is used since this
#is a bit wise AND
Y.next=X0 and X1
#if `&` had been used the conversion
#would yield `&` the .all() AND
# when dealing with bus's and behavior mux's this
#distiction must be known
#see:
# https://stackoverflow.com/questions/17327680/what-is-the-difference-between-single-and-double-ampersand-binary-opera
return instances()
#generate systmatic and random test values for AndGate_TB
#stimules inputs X1 and X2
TestLen=10
SystmaticVals=list(itertools.product([0,1], repeat=2))
X0TVs=np.array([i[1] for i in SystmaticVals]).astype(int)
np.random.seed(15)
X0TVs=np.append(X0TVs, np.random.randint(0,2, TestLen)).astype(int)
X1TVs=np.array([i[0] for i in SystmaticVals]).astype(int)
#the random genrator must have a differint seed beween each generation
#call in order to produce differint values for each call
np.random.seed(16)
X1TVs=np.append(X1TVs, np.random.randint(0,2, TestLen)).astype(int)
TestLen=len(X1TVs)
SystmaticVals, X1TVs, X0TVs, TestLen
Peeker.clear()
X0=Signal(bool(0)); Peeker(X0, 'X0')
X1=Signal(bool(0)); Peeker(X1, 'X1')
Y=Signal(bool(0)); Peeker(Y, 'Y')
DUT=AndGate(X0, X1, Y)
def AndGate_TB():
myHDL only testbench for module `AndGate`
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TVs[i])
X1.next=int(X1TVs[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, AndGate_TB(), *Peeker.instances()).run()
Peeker.to_wavedrom('X1', 'X0', 'Y')
AndData=Peeker.to_dataframe()
AndData=AndData[['X1', 'X0', 'Y']]
AndData
AndData['YRef']=AndData.apply(lambda row:YEqN(row['X0'], row['X1']), axis=1).astype(int)
AndData
Test=(AndData['Y']==AndData['YRef']).all()
print(f'Module `AndGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('AndGate');
#create BitVectora for AndGate_TBV
X0TVs=intbv(int(''.join(X0TVs.astype(str)), 2))[TestLen:]
X1TVs=intbv(int(''.join(X1TVs.astype(str)), 2))[TestLen:]
X0TVs, bin(X0TVs), X1TVs, bin(X1TVs)
@block
def AndGate_TBV():
myHDL -> Verilog testbench for module `AndGate`
X0=Signal(bool(0))
X1=Signal(bool(0))
Y=Signal(bool(0))
@always_comb
def print_data():
print(X0, X1, Y)
#Test Signal Bit Vectors
X0TV=Signal(X0TVs)
X1TV=Signal(X1TVs)
DUT=AndGate(X0, X1, Y)
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TV[i])
X1.next=int(X1TV[i])
yield delay(1)
raise StopSimulation()
return instances()
TB=AndGate_TBV()
TB.convert(hdl="Verilog", initial_values=True)
VerilogTextReader('AndGate_TBV');
d=schem.Drawing(unit=.5)
#add elements
G=d.add(l.AND2,d='right', label='$AND_2$')
#Gate to led to gnd
d.add(e.LINE, d='right', xy=G.out)
d.add(e.DOT, label='$Y$')
d.add(e.LINE, d='down', l=d.unit*2)
LD0=d.add(e.LED, d='down', label='LD0')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.GND)
d.add(e.LINE, d='left', xy=G.in1, l=d.unit)
d.add(e.DOT, label='$X_0$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='up', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*2)
SW0=d.add(e.SWITCH_SPDT, lftlabel='SW0')
d.add(e.GND, xy=SW0.c)
d.add(e.VDD, xy=SW0.b)
d.add(e.LINE, d='left', xy=G.in2, l=d.unit)
d.add(e.DOT, botlabel='$X_1$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='down', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*4)
SW1=d.add(e.SWITCH_SPDT, lftlabel='SW1')
d.add(e.GND, xy=SW1.c)
d.add(e.VDD, xy=SW1.b)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_AndGate');
d = schem.Drawing(unit=.5)
G = d.add(l.AND2, label='$OR_2$')
d.add(e.DOT_OPEN, xy=G.out, rgtlabel='$Y$')
d.add(e.DOT_OPEN, xy=G.in1, lftlabel='$X_1$')
d.add(e.DOT_OPEN, xy=G.in2, lftlabel='$X_0$')
d.draw()
X0, X1, Y=symbols('X_0, X_1, Y')
YEq=Eq(Y, X0|X1); YEq
TruthTabelGenrator(YEq)
YEqN=lambdify([X0, X1], YEq.rhs, dummify=False)
SystmaticVals=np.array(list(itertools.product([0,1], repeat=2)))
SystmaticVals, YEqN(SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)
@block
def OrGate(X0, X1, Y):
Or Gate demo module
Input:
X0(bool): Or gate input 0
X1(bool): Or gate input 1
Output:
Y(bool): Or gate ouput
@always_comb
def logic():
#note here that `or` is used since this
#is a bit wise OR
Y.next=X0 or X1
#if `|` had been used the conversion
#would yield `||` the .all() OR
# when dealing with bus's and behavior mux's this
#distiction must be known
#see:
# https://stackoverflow.com/questions/17327680/what-is-the-difference-between-single-and-double-ampersand-binary-opera
return instances()
#generate systmatic and random test values for OrGate_TB
#stimules inputs X1 and X2
TestLen=10
SystmaticVals=list(itertools.product([0,1], repeat=2))
X0TVs=np.array([i[1] for i in SystmaticVals]).astype(int)
np.random.seed(17)
X0TVs=np.append(X0TVs, np.random.randint(0,2, TestLen)).astype(int)
X1TVs=np.array([i[0] for i in SystmaticVals]).astype(int)
np.random.seed(18)
X1TVs=np.append(X1TVs, np.random.randint(0,2, TestLen)).astype(int)
TestLen=len(X1TVs)
SystmaticVals, X1TVs, X0TVs, TestLen
Peeker.clear()
X0=Signal(bool(0)); Peeker(X0, 'X0')
X1=Signal(bool(0)); Peeker(X1, 'X1')
Y=Signal(bool(0)); Peeker(Y, 'Y')
DUT=OrGate(X0, X1, Y)
def OrGate_TB():
myHDL only testbench for module `OrGate`
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TVs[i])
X1.next=int(X1TVs[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, OrGate_TB(), *Peeker.instances()).run()
Peeker.to_wavedrom('X1', 'X0', 'Y')
OrData=Peeker.to_dataframe()
OrData=OrData[['X1', 'X0', 'Y']]
OrData
OrData['YRef']=OrData.apply(lambda row:YEqN(row['X0'], row['X1']), axis=1).astype(int)
OrData
Test=(OrData['Y']==OrData['YRef']).all()
print(f'Module `OrGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('OrGate');
#create BitVectora for OrGate_TBV
X0TVs=intbv(int(''.join(X0TVs.astype(str)), 2))[TestLen:]
X1TVs=intbv(int(''.join(X1TVs.astype(str)), 2))[TestLen:]
X0TVs, bin(X0TVs), X1TVs, bin(X1TVs)
@block
def OrGate_TBV():
myHDL -> Verilog testbench for module `OrGate`
X0=Signal(bool(0))
X1=Signal(bool(0))
Y=Signal(bool(0))
@always_comb
def print_data():
print(X0, X1, Y)
#Test Signal Bit Vectors
X0TV=Signal(X0TVs)
X1TV=Signal(X1TVs)
DUT=OrGate(X0, X1, Y)
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TV[i])
X1.next=int(X1TV[i])
yield delay(1)
raise StopSimulation()
return instances()
TB=OrGate_TBV()
TB.convert(hdl="Verilog", initial_values=True)
VerilogTextReader('OrGate_TBV');
d=schem.Drawing(unit=.5)
#add elements
G=d.add(l.OR2,d='right', label='$OR_2$')
#Gate to led to gnd
d.add(e.LINE, d='right', xy=G.out)
d.add(e.DOT, label='$Y$')
d.add(e.LINE, d='down', l=d.unit*2)
LD0=d.add(e.LED, d='down', label='LD0')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.GND)
d.add(e.LINE, d='left', xy=G.in1, l=d.unit)
d.add(e.DOT, label='$X_0$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='up', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*2)
SW0=d.add(e.SWITCH_SPDT, lftlabel='SW0')
d.add(e.GND, xy=SW0.c)
d.add(e.VDD, xy=SW0.b)
d.add(e.LINE, d='left', xy=G.in2, l=d.unit)
d.add(e.DOT, botlabel='$X_1$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='down', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*4)
SW1=d.add(e.SWITCH_SPDT, lftlabel='SW1')
d.add(e.GND, xy=SW1.c)
d.add(e.VDD, xy=SW1.b)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_OrGate');
d = schem.Drawing(unit=.5)
G = d.add(l.XOR2, label='$XOR_2$')
d.add(e.DOT_OPEN, xy=G.out, rgtlabel='$Y$')
d.add(e.DOT_OPEN, xy=G.in1, lftlabel='$X_1$')
d.add(e.DOT_OPEN, xy=G.in2, lftlabel='$X_0$')
d.draw()
X0, X1, Y=symbols('X_0, X_1, Y')
YEq=Eq(Y, X0^X1); YEq
TruthTabelGenrator(YEq)
#lampdfy must have maping manuly set from `Xor` to `np.bitwise_xor`
YEqN=lambdify([X0, X1], YEq.rhs, {'Xor':np.bitwise_xor}, dummify=False)
SystmaticVals=np.array(list(itertools.product([0,1], repeat=2)))
SystmaticVals, YEqN(SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)
@block
def XorGate(X0, X1, Y):
XOR Gate demo module
Input:
X0(bool): XOR gate input 0
X1(bool): XOR gate input 1
Output:
Y(bool): XOR gate ouput
@always_comb
def logic():
Y.next=X0 ^ X1
return instances()
#generate systmatic and random test values for XorGate_TB
#stimules inputs X1 and X2
TestLen=10
SystmaticVals=list(itertools.product([0,1], repeat=2))
X0TVs=np.array([i[1] for i in SystmaticVals]).astype(int)
np.random.seed(19)
X0TVs=np.append(X0TVs, np.random.randint(0,2, TestLen)).astype(int)
X1TVs=np.array([i[0] for i in SystmaticVals]).astype(int)
np.random.seed(20)
X1TVs=np.append(X1TVs, np.random.randint(0,2, TestLen)).astype(int)
TestLen=len(X1TVs)
SystmaticVals, X1TVs, X0TVs, TestLen
Peeker.clear()
X0=Signal(bool(0)); Peeker(X0, 'X0')
X1=Signal(bool(0)); Peeker(X1, 'X1')
Y=Signal(bool(0)); Peeker(Y, 'Y')
DUT=XorGate(X0, X1, Y)
def XorGate_TB():
myHDL only testbench for module `XorGate`
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TVs[i])
X1.next=int(X1TVs[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, XorGate_TB(), *Peeker.instances()).run()
Peeker.to_wavedrom('X1', 'X0', 'Y')
XorData=Peeker.to_dataframe()
XorData=XorData[['X1', 'X0', 'Y']]
XorData
XorData['YRef']=XorData.apply(lambda row:YEqN(row['X0'], row['X1']), axis=1).astype(int)
XorData
Test=(XorData['Y']==XorData['YRef']).all()
print(f'Module `XorGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('XorGate');
#create BitVectora for XorGate_TBV
X0TVs=intbv(int(''.join(X0TVs.astype(str)), 2))[TestLen:]
X1TVs=intbv(int(''.join(X1TVs.astype(str)), 2))[TestLen:]
X0TVs, bin(X0TVs), X1TVs, bin(X1TVs)
@block
def XorGate_TBV():
myHDL -> Verilog testbench for module `XorGate`
X0=Signal(bool(0))
X1=Signal(bool(0))
Y=Signal(bool(0))
@always_comb
def print_data():
print(X0, X1, Y)
#Test Signal Bit Vectors
X0TV=Signal(X0TVs)
X1TV=Signal(X1TVs)
DUT=XorGate(X0, X1, Y)
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TV[i])
X1.next=int(X1TV[i])
yield delay(1)
raise StopSimulation()
return instances()
TB=XorGate_TBV()
TB.convert(hdl="Verilog", initial_values=True)
VerilogTextReader('XorGate_TBV');
d=schem.Drawing(unit=.5)
#add elements
G=d.add(l.XOR2,d='right', label='$XOR_2$')
#Gate to led to gnd
d.add(e.LINE, d='right', xy=G.out)
d.add(e.DOT, label='$Y$')
d.add(e.LINE, d='down', l=d.unit*2)
LD0=d.add(e.LED, d='down', label='LD0')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.GND)
d.add(e.LINE, d='left', xy=G.in1, l=d.unit)
d.add(e.DOT, label='$X_0$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='up', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*2)
SW0=d.add(e.SWITCH_SPDT, lftlabel='SW0')
d.add(e.GND, xy=SW0.c)
d.add(e.VDD, xy=SW0.b)
d.add(e.LINE, d='left', xy=G.in2, l=d.unit)
d.add(e.DOT, botlabel='$X_1$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='down', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*4)
SW1=d.add(e.SWITCH_SPDT, lftlabel='SW1')
d.add(e.GND, xy=SW1.c)
d.add(e.VDD, xy=SW1.b)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_XorGate');
d = schem.Drawing(unit=.5)
G = d.add(l.NAND2, label='$NAND_2$')
d.add(e.DOT_OPEN, xy=G.out, rgtlabel='$Y$')
d.add(e.DOT_OPEN, xy=G.in1, lftlabel='$X_1$')
d.add(e.DOT_OPEN, xy=G.in2, lftlabel='$X_0$')
d.draw()
X0, X1, Y=symbols('X_0, X_1, Y')
YEq=Eq(Y, Nand(X0,X1)); YEq
TruthTabelGenrator(YEq)
YEqN=lambdify([X0, X1], YEq.rhs, dummify=False)
SystmaticVals=np.array(list(itertools.product([0,1], repeat=2)))
SystmaticVals, YEqN(SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)
@block
def NAndGate(X0, X1, Y):
NAND Gate demo module
Input:
X0(bool): NAnd gate input 0
X1(bool): NAnd gate input 1
Output:
Y(bool): NAnd gate ouput
@always_comb
def logic():
#note here that `and` is used since this
#is a bit wise AND
Y.next=not(X0 and X1)
#if `&` had been used the conversion
#would yield `&` the .all() AND
# when dealing with bus's and behavior mux's this
#distiction must be known
#see:
# https://stackoverflow.com/questions/17327680/what-is-the-difference-between-single-and-double-ampersand-binary-opera
return instances()
#generate systmatic and random test values for AndGate_TB
#stimules inputs X1 and X2
TestLen=10
SystmaticVals=list(itertools.product([0,1], repeat=2))
X0TVs=np.array([i[1] for i in SystmaticVals]).astype(int)
np.random.seed(21)
X0TVs=np.append(X0TVs, np.random.randint(0,2, TestLen)).astype(int)
X1TVs=np.array([i[0] for i in SystmaticVals]).astype(int)
np.random.seed(23)
X1TVs=np.append(X1TVs, np.random.randint(0,2, TestLen)).astype(int)
TestLen=len(X1TVs)
SystmaticVals, X1TVs, X0TVs, TestLen
Peeker.clear()
X0=Signal(bool(0)); Peeker(X0, 'X0')
X1=Signal(bool(0)); Peeker(X1, 'X1')
Y=Signal(bool(0)); Peeker(Y, 'Y')
DUT=NAndGate(X0, X1, Y)
def NAndGate_TB():
myHDL only testbench for module `NAndGate`
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TVs[i])
X1.next=int(X1TVs[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, NAndGate_TB(), *Peeker.instances()).run()
Peeker.to_wavedrom('X1', 'X0', 'Y')
NAndData=Peeker.to_dataframe()
NAndData=NAndData[['X1', 'X0', 'Y']]
NAndData
NAndData['YRef']=NAndData.apply(lambda row:YEqN(row['X0'], row['X1']), axis=1).astype(int)
NAndData
Test=(NAndData['Y']==NAndData['YRef']).all()
print(f'Module `NAndGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('NAndGate');
#create BitVectora for NAndGate_TBV
X0TVs=intbv(int(''.join(X0TVs.astype(str)), 2))[TestLen:]
X1TVs=intbv(int(''.join(X1TVs.astype(str)), 2))[TestLen:]
X0TVs, bin(X0TVs), X1TVs, bin(X1TVs)
@block
def NAndGate_TBV():
myHDL -> Verilog testbench for module `NAndGate`
X0=Signal(bool(0))
X1=Signal(bool(0))
Y=Signal(bool(0))
@always_comb
def print_data():
print(X0, X1, Y)
#Test Signal Bit Vectors
X0TV=Signal(X0TVs)
X1TV=Signal(X1TVs)
DUT=NAndGate(X0, X1, Y)
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TV[i])
X1.next=int(X1TV[i])
yield delay(1)
raise StopSimulation()
return instances()
TB=NAndGate_TBV()
TB.convert(hdl="Verilog", initial_values=True)
VerilogTextReader('NAndGate_TBV');
d=schem.Drawing(unit=.5)
#add elements
G=d.add(l.NAND2,d='right', label='$NAND_2$')
#Gate to led to gnd
d.add(e.LINE, d='right', xy=G.out)
d.add(e.DOT, label='$Y$')
d.add(e.LINE, d='down', l=d.unit*2)
LD0=d.add(e.LED, d='down', label='LD0')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.GND)
d.add(e.LINE, d='left', xy=G.in1, l=d.unit)
d.add(e.DOT, label='$X_0$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='up', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*2)
SW0=d.add(e.SWITCH_SPDT, lftlabel='SW0')
d.add(e.GND, xy=SW0.c)
d.add(e.VDD, xy=SW0.b)
d.add(e.LINE, d='left', xy=G.in2, l=d.unit)
d.add(e.DOT, botlabel='$X_1$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='down', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*4)
SW1=d.add(e.SWITCH_SPDT, lftlabel='SW1')
d.add(e.GND, xy=SW1.c)
d.add(e.VDD, xy=SW1.b)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_NAndGate');
d = schem.Drawing(unit=.5)
G = d.add(l.NOR2, label='$NOR_2$')
d.add(e.DOT_OPEN, xy=G.out, rgtlabel='$Y$')
d.add(e.DOT_OPEN, xy=G.in1, lftlabel='$X_1$')
d.add(e.DOT_OPEN, xy=G.in2, lftlabel='$X_0$')
d.draw()
X0, X1, Y=symbols('X_0, X_1, Y')
YEq=Eq(Y, Nor(X0,X1)); YEq
TruthTabelGenrator(YEq)
YEqN=lambdify([X0, X1], YEq.rhs, dummify=False)
SystmaticVals=np.array(list(itertools.product([0,1], repeat=2)))
SystmaticVals, YEqN(SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)
@block
def NOrGate(X0, X1, Y):
NOr Gate demo module
Input:
X0(bool): Or gate input 0
X1(bool): Or gate input 1
Output:
Y(bool): Or gate ouput
@always_comb
def logic():
#note here that `or` is used since this
#is a bit wise OR
Y.next=not(X0 or X1)
#if `|` had been used the conversion
#would yield `||` the .all() OR
# when dealing with bus's and behavior mux's this
#distiction must be known
#see:
# https://stackoverflow.com/questions/17327680/what-is-the-difference-between-single-and-double-ampersand-binary-opera
return instances()
#generate systmatic and random test values for NOrGate_TB
#stimules inputs X1 and X2
TestLen=10
SystmaticVals=list(itertools.product([0,1], repeat=2))
X0TVs=np.array([i[1] for i in SystmaticVals]).astype(int)
np.random.seed(24)
X0TVs=np.append(X0TVs, np.random.randint(0,2, TestLen)).astype(int)
X1TVs=np.array([i[0] for i in SystmaticVals]).astype(int)
np.random.seed(25)
X1TVs=np.append(X1TVs, np.random.randint(0,2, TestLen)).astype(int)
TestLen=len(X1TVs)
SystmaticVals, X1TVs, X0TVs, TestLen
Peeker.clear()
X0=Signal(bool(0)); Peeker(X0, 'X0')
X1=Signal(bool(0)); Peeker(X1, 'X1')
Y=Signal(bool(0)); Peeker(Y, 'Y')
DUT=NOrGate(X0, X1, Y)
def NOrGate_TB():
myHDL only testbench for module `NOrGate`
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TVs[i])
X1.next=int(X1TVs[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, NOrGate_TB(), *Peeker.instances()).run()
Peeker.to_wavedrom('X1', 'X0', 'Y')
NOrData=Peeker.to_dataframe()
NOrData=NOrData[['X1', 'X0', 'Y']]
NOrData
NOrData['YRef']=NOrData.apply(lambda row:YEqN(row['X0'], row['X1']), axis=1).astype(int)
NOrData
Test=(NOrData['Y']==NOrData['YRef']).all()
print(f'Module `NOrGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('NOrGate');
#create BitVectora for NOrGate_TBV
X0TVs=intbv(int(''.join(X0TVs.astype(str)), 2))[TestLen:]
X1TVs=intbv(int(''.join(X1TVs.astype(str)), 2))[TestLen:]
X0TVs, bin(X0TVs), X1TVs, bin(X1TVs)
@block
def NOrGate_TBV():
myHDL -> Verilog testbench for module `NOrGate`
X0=Signal(bool(0))
X1=Signal(bool(0))
Y=Signal(bool(0))
@always_comb
def print_data():
print(X0, X1, Y)
#Test Signal Bit Vectors
X0TV=Signal(X0TVs)
X1TV=Signal(X1TVs)
DUT=NOrGate(X0, X1, Y)
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TV[i])
X1.next=int(X1TV[i])
yield delay(1)
raise StopSimulation()
return instances()
TB=NOrGate_TBV()
TB.convert(hdl="Verilog", initial_values=True)
VerilogTextReader('NOrGate_TBV');
d=schem.Drawing(unit=.5)
#add elements
G=d.add(l.NOR2,d='right', label='$NOR_2$')
#Gate to led to gnd
d.add(e.LINE, d='right', xy=G.out)
d.add(e.DOT, label='$Y$')
d.add(e.LINE, d='down', l=d.unit*2)
LD0=d.add(e.LED, d='down', label='LD0')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.GND)
d.add(e.LINE, d='left', xy=G.in1, l=d.unit)
d.add(e.DOT, label='$X_0$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='up', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*2)
SW0=d.add(e.SWITCH_SPDT, lftlabel='SW0')
d.add(e.GND, xy=SW0.c)
d.add(e.VDD, xy=SW0.b)
d.add(e.LINE, d='left', xy=G.in2, l=d.unit)
d.add(e.DOT, botlabel='$X_1$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='down', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*4)
SW1=d.add(e.SWITCH_SPDT, lftlabel='SW1')
d.add(e.GND, xy=SW1.c)
d.add(e.VDD, xy=SW1.b)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_NOrGate');
d = schem.Drawing(unit=.5)
G = d.add(l.XNOR2, label='$XNOR_2$')
d.add(e.DOT_OPEN, xy=G.out, rgtlabel='$Y$')
d.add(e.DOT_OPEN, xy=G.in1, lftlabel='$X_1$')
d.add(e.DOT_OPEN, xy=G.in2, lftlabel='$X_0$')
d.draw()
X0, X1, Y=symbols('X_0, X_1, Y')
#expanded exspresion of XNOR
YEq=Eq(Y, X0&X1|~X0&~X1); YEq
TruthTabelGenrator(YEq)
YEqN=lambdify([X0, X1], YEq.rhs, dummify=False)
SystmaticVals=np.array(list(itertools.product([0,1], repeat=2)))
SystmaticVals, YEqN(SystmaticVals[:, 1], SystmaticVals[:, 0]).astype(int)
@block
def XNOrGate(X0, X1, Y):
XNOR Gate demo module
Input:
X0(bool): XOR gate input 0
X1(bool): XOR gate input 1
Output:
Y(bool): XOR gate ouput
@always_comb
def logic():
Y.next=not(X0 ^ X1)
return instances()
#generate systmatic and random test values for XNOrGate_TB
#stimules inputs X1 and X2
TestLen=10
SystmaticVals=list(itertools.product([0,1], repeat=2))
X0TVs=np.array([i[1] for i in SystmaticVals]).astype(int)
np.random.seed(26)
X0TVs=np.append(X0TVs, np.random.randint(0,2, TestLen)).astype(int)
X1TVs=np.array([i[0] for i in SystmaticVals]).astype(int)
np.random.seed(27)
X1TVs=np.append(X1TVs, np.random.randint(0,2, TestLen)).astype(int)
TestLen=len(X1TVs)
SystmaticVals, X1TVs, X0TVs, TestLen
Peeker.clear()
X0=Signal(bool(0)); Peeker(X0, 'X0')
X1=Signal(bool(0)); Peeker(X1, 'X1')
Y=Signal(bool(0)); Peeker(Y, 'Y')
DUT=XNOrGate(X0, X1, Y)
def XNOrGate_TB():
myHDL only testbench for module `NOrGate`
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TVs[i])
X1.next=int(X1TVs[i])
yield delay(1)
raise StopSimulation()
return instances()
sim=Simulation(DUT, XNOrGate_TB(), *Peeker.instances()).run()
Peeker.to_wavedrom('X1', 'X0', 'Y')
XNOrData=Peeker.to_dataframe()
XNOrData=XNOrData[['X1', 'X0', 'Y']]
XNOrData
XNOrData['YRef']=XNOrData.apply(lambda row:YEqN(row['X0'], row['X1']), axis=1).astype(int)
XNOrData
Test=(XNOrData['Y']==XNOrData['YRef']).all()
print(f'Module `XNOrGate` works as exspected: {Test}')
DUT.convert()
VerilogTextReader('XNOrGate');
#create BitVectora for XNOrGate_TBV
X0TVs=intbv(int(''.join(X0TVs.astype(str)), 2))[TestLen:]
X1TVs=intbv(int(''.join(X1TVs.astype(str)), 2))[TestLen:]
X0TVs, bin(X0TVs), X1TVs, bin(X1TVs)
@block
def XNOrGate_TBV():
myHDL -> Verilog testbench for module `NOrGate`
X0=Signal(bool(0))
X1=Signal(bool(0))
Y=Signal(bool(0))
@always_comb
def print_data():
print(X0, X1, Y)
#Test Signal Bit Vectors
X0TV=Signal(X0TVs)
X1TV=Signal(X1TVs)
DUT=XNOrGate(X0, X1, Y)
@instance
def stimules():
for i in range(TestLen):
X0.next=int(X0TV[i])
X1.next=int(X1TV[i])
yield delay(1)
raise StopSimulation()
return instances()
TB=XNOrGate_TBV()
TB.convert(hdl="Verilog", initial_values=True)
VerilogTextReader('XNOrGate_TBV');
d=schem.Drawing(unit=.5)
#add elements
G=d.add(l.XNOR2,d='right', label='$XNOR_2$')
#Gate to led to gnd
d.add(e.LINE, d='right', xy=G.out)
d.add(e.DOT, label='$Y$')
d.add(e.LINE, d='down', l=d.unit*2)
LD0=d.add(e.LED, d='down', label='LD0')
d.add(e.LINE, d='down', l=d.unit*2)
d.add(e.GND)
d.add(e.LINE, d='left', xy=G.in1, l=d.unit)
d.add(e.DOT, label='$X_0$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='up', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*2)
SW0=d.add(e.SWITCH_SPDT, lftlabel='SW0')
d.add(e.GND, xy=SW0.c)
d.add(e.VDD, xy=SW0.b)
d.add(e.LINE, d='left', xy=G.in2, l=d.unit)
d.add(e.DOT, botlabel='$X_1$')
d.add(e.LINE,d='left', l=d.unit)
d.add(e.LINE,d='down', l=d.unit*2)
d.add(e.LINE,d='left', l=d.unit*4)
SW1=d.add(e.SWITCH_SPDT, lftlabel='SW1')
d.add(e.GND, xy=SW1.c)
d.add(e.VDD, xy=SW1.b)
d.draw()
ConstraintXDCTextReader('PYNQ_Z1Constraints_XNOrGate');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: \title{Logic Gate Primitives in myHDL}
Step2: Table of Digital Gate Symbols commonly used
Step3: Sympy Exspresion
Step5: myHDL Module
Step6: Verilog Conversion
Step7: \begin{figure}
Step8: Board Constraint
Step9: LED 0 (the furthest Right one above the buttons) will light up green
Step10: Sympy Expression
Step12: myHDL Module
Step13: Verilog Conversion
Step14: \begin{figure}
Step15: Board Constraint
Step16: Nothing will happen since the Low Signal on the PYNQ-Z1 is ground so LED 0 will remain off
Step17: Sympy Expression
Step19: myHDL Module
Step21: ## myHDL Testing
Step22: Verilog Conversion
Step24: \begin{figure}
Step25: PYNQ-Z1 Deployment
Step26: Board Constraint File
Step27: Video of Deployment
Step28: Sympy Expression
Step30: myHDL Module
Step32: myHDL Testing
Step33: Verilog Conversion
Step35: \begin{figure}
Step36: PYNQ-Z1 Deployment
Step37: Board Constraint
Step38: Video of Deployment
Step39: Sympy Expression
Step41: myHDL Module
Step43: myHDL Testing
Step44: Verilog Conversion
Step46: \begin{figure}
Step47: PYNQ-Z1 Deployment
Step48: Board Constraint
Step49: Video of Deployment
Step50: Sympy Expression
Step52: myHDL Module
Step54: myHDL Testing
Step55: Verilog Conversion
Step57: \begin{figure}
Step58: PYNQ-Z1 Deployment
Step59: Board Constraint
Step60: Video of Deployment
Step61: Sympy Expression
Step63: myHDL Module
Step65: myHDL Testing
Step66: Verilog Conversion
Step68: \begin{figure}
Step69: PYNQ-Z1 Deployment
Step70: Board Constraint
Step71: Video of Deployment
Step72: Sympy Expression
Step74: myHDL Module
Step76: myHDL Testing
Step77: Verilog Conversion
Step79: myHDL to Verilog Testbench
Step80: \begin{figure}
Step81: Board Constraint
Step82: Video of Deployment
Step83: Sympy Expression
Step85: myHDL Module
Step87: myHDL Testing
Step88: Verilog Conversion
Step90: \begin{figure}
Step91: PYNQ-Z1 Deployment
Step92: Board Constraint
Step93: Video of Deployment
Step94: Sympy Expression
Step96: myHDL Module
Step98: myHDL Testing
Step99: Verilog Conversion
Step101: \begin{figure}
Step102: PYNQ-Z1 Deployment
Step103: Board Constraint
|
7,556
|
<ASSISTANT_TASK:>
Python Code:
print('## Model structure summary\n')
print(model)
params = model.get_params()
n_params = {p.name : p.get_value().size for p in params}
total_params = sum(n_params.values())
print('\n## Number of parameters\n')
print(' ' + '\n '.join(['{0} : {1} ({2:.1f}%)'.format(k, v, 100.*v/total_params)
for k, v in sorted(n_params.items(), key=lambda x: x[0])]))
print('\nTotal : {0}'.format(total_params))
tr = np.array(model.monitor.channels['valid_y_y_1_nll'].time_record) / 3600.
fig = plt.figure(figsize=(12,8))
ax1 = fig.add_subplot(111)
ax1.plot(model.monitor.channels['valid_y_y_1_nll'].val_record)
ax1.plot(model.monitor.channels['train_y_y_1_nll'].val_record)
ax1.set_xlabel('Epochs')
ax1.legend(['Valid', 'Train'])
ax1.set_ylabel('NLL')
ax1.set_ylim(0., 5.)
ax1.grid(True)
ax2 = ax1.twiny()
ax2.set_xticks(np.arange(0,tr.shape[0],20))
ax2.set_xticklabels(['{0:.2f}'.format(t) for t in tr[::20]])
ax2.set_xlabel('Hours')
print("Minimum validation set NLL {0}".format(min(model.monitor.channels['valid_y_y_1_nll'].val_record)))
pv = get_weights_report(model=model)
img = pv.get_img()
img = img.resize((8*img.size[0], 8*img.size[1]))
img_data = io.BytesIO()
img.save(img_data, format='png')
display(Image(data=img_data.getvalue(), format='png'))
plt.plot(model.monitor.channels['learning_rate'].val_record)
h1_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h1_W_kernel_norm_mean'].val_record])
h1_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h1_kernel_norms_mean'].val_record])
plt.plot(h1_W_norms / h1_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h1_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h1_kernel_norms_max'].val_record)
h2_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h2_W_kernel_norm_mean'].val_record])
h2_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h2_kernel_norms_mean'].val_record])
plt.plot(h2_W_norms / h2_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h2_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h2_kernel_norms_max'].val_record)
h3_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h3_W_kernel_norm_mean'].val_record])
h3_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h3_kernel_norms_mean'].val_record])
plt.plot(h3_W_norms / h3_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h3_kernel_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h3_kernel_norms_max'].val_record)
h4_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h4_W_col_norm_mean'].val_record])
h4_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h4_col_norms_mean'].val_record])
plt.plot(h4_W_norms / h4_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h4_col_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h4_col_norms_max'].val_record)
h5_W_up_norms = np.array([float(v) for v in model.monitor.channels['mean_update_h5_W_col_norm_mean'].val_record])
h5_W_norms = np.array([float(v) for v in model.monitor.channels['valid_h5_col_norms_mean'].val_record])
plt.plot(h5_W_norms / h5_W_up_norms)
plt.show()
plt.plot(model.monitor.channels['valid_h5_col_norms_mean'].val_record)
plt.plot(model.monitor.channels['valid_h5_col_norms_max'].val_record)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train and valid set NLL trace
Step2: Visualising first layer weights
Step3: Learning rate
Step4: Update norm monitoring
|
7,557
|
<ASSISTANT_TASK:>
Python Code:
1000*(1/10**1.5)
bins = np.arange(1,max(date)+1,7)
H = pl.histogram(date,bins = bins)
x = H[1][:-1]
y = H[0]
c = (y>0)*(x > 30.0)
lx = np.log10(x[c] - min(x[c])+1)
ly = np.log10(y[c])
B = binning(lx,ly,20)
c = (B[0] >= -1)*(B[0] < 3.0)*(B[1] > 0.1)
fit = S.linregress(B[0][c],B[1][c])
print fit
pl.plot(lx,ly,'b.')
pl.plot(B[0],B[1],'ro')
pl.plot(B[0][c],B[0][c]*fit[0]+fit[1],'k-')
pl.xlabel("Time [days]")
pl.ylabel("Count new Vulnerabilities")
arrivals = []
uuser = []
for u in np.unique(user):
index = np.argwhere(user==u)[0][0]
arrivals = np.append(arrivals,index)
uuser = np.append(uuser,u)
#print u,date[arrivals[-1]]
o = np.argsort(arrivals)
uuser = uuser[o]
arrivals = arrivals[o]
binsize = 7
bins = np.arange(1,max(date)+1,binsize)
H = pl.histogram(date[list(arrivals)],bins = bins)
x = H[1][:-1]
y = H[0]
c = (y>0)*(x > 0)
lx = np.log10(x[c] - min(x[c])+1)
ly = np.log10(y[c])
B = binning(lx,ly,20)
c = (B[0] >= -1)*(B[0] < 3.0)#*(B[1] > .4)
fit = S.linregress(B[0][c],B[1][c])
print fit
pl.plot(lx,ly,'b.')
pl.plot(B[0],B[1],'ro')
pl.plot(B[0][c],B[0][c]*fit[0]+fit[1],'k-')
pl.xlabel("log10(Time [days])")
pl.ylabel("log10(Count new researchers)")
x = H[1][:-1]
y = H[0]
c = y > 0
x = x[c]
y = y[c]
ly = np.log10(y)
Hr = pl.histogram(date,weights=reward,bins=bins)
xr = Hr[1][:-1]
yr = Hr[0]
c = yr > 0
xr = xr[c]
yr = yr[c]
lyr = np.log10(yr)
Hra = pl.histogram(date[list(arrivals)],weights=reward[list(arrivals)],bins=bins)
xra = Hra[1][:-1]
yra = Hra[0]
c = yra > 0
xra = xra[c]
yra = yra[c]
lyra = np.log10(yra)
print len(lyr),len(lyra)
pl.bar(x,ly,binsize,color="blue",alpha=0.3)
pl.bar(xr,lyr,binsize,color = "red",alpha=0.3)
#pl.bar(xra,lyra,binsize,color = "purple",alpha=0.3)
pl.xlabel("Time")
pl.ylabel("log10(values)")
h = H[0] + np.random.rand(len(H[0])) - 0.5
hr = Hr[0] + np.random.rand(len(H[0])) - 0.5
print S.spearmanr(h,hr),len(h),len(hr)
L,rho = crossLagCorr(hr[:],h[:],lagspan=15)
pl.plot(np.array(L)*binsize,rho,'o-')
pl.ylim(ymax=1)
pl.ylabel("cross-correlation hr -> h")
pl.xlabel("Time [days]")
test = []
for u in uuser:
index = np.argwhere(user==u)
df = np.diff(date[index].flatten())
test = np.append(test,df)
test[test < 1] = 1.
x,y = rankorder(test)
pl.loglog(x,y,'o')
print np.median(test),np.mean(test),np.std(test),np.percentile(test,90)
pl.xlabel("delta t between two awards by one user")
pl.ylabel("rank ordering")
x = []
y = []
X = []
Y = []
for u in uuser:
index = np.argwhere(user==u)
x = np.append(x,len(reward[index].flatten()))
y = np.append(y,np.sum(reward[index].flatten()))
#print len(reward[index].flatten())
Y = np.append(Y,reward[index].flatten())
X = np.append(X,np.arange(1,len(reward[index].flatten())+1))
print len(X),len(Y)
pl.figure(1,(8,8))
c = (X > 0)*(Y > 0)
lX = np.log10(X[c])
lY = np.log10(Y[c])
B = binning(X[c],lY[c],30)
print S.linregress(X[c],lY[c])
fit = S.linregress(B[0],B[1])
print fit
pl.plot(X[c],lY[c],'.')
pl.plot(B[0],B[1],'ro')
pl.plot(B[0],B[0]*fit[0]+fit[1],'k-')
pl.xlabel("rank submission with award")
pl.ylabel("log10(bounty)")
pl.figure(2,(8,8))
c = (x > 0)*(y > 0)
lx = np.log10(x[c])
ly = np.log10(y[c])
B = binning(lx,ly,30)
fit = S.linregress(B[0],B[1])
print fit
pl.plot(lx,ly,'.')
pl.plot(B[0],B[1],'ro')
pl.plot(B[0],B[0]*fit[0] + fit[1],'k-')
pl.xlabel("log10(count rewards)")
pl.ylabel("lgo10(sum rewards)")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Arrival of new vulnerabilities
Step2: Arrival of New Researchers
Step3: Researcher Arrival following rewards
Step4: Inter-time between 2 awards
|
7,558
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pylab
import numpy as np
import pandas as pd
from sklearn.svm import OneClassSVM
from sklearn.covariance import EllipticEnvelope
pylab.rcParams.update({'font.size': 14})
df = pd.read_csv("AnomalyData.csv")
df.head()
state_code = df["state_code"]
data = df.loc[:, "data science": "Openness"]
param = "modern dance"
qv1 = data[param].quantile(0.25)
qv2 = data[param].quantile(0.5)
qv3 = data[param].quantile(0.75)
qv_limit = 1.5 * (qv3 - qv1)
un_outliers_mask = (data[param] > qv3 + qv_limit) | (data[param] < qv1 - qv_limit)
un_outliers_data = data[param][un_outliers_mask]
un_outliers_name = state_code[un_outliers_mask]
fig = pylab.figure(figsize=(4,6))
ax = fig.add_subplot(1, 1, 1)
for name, y in zip(un_outliers_name, un_outliers_data):
ax.text(1, y, name)
ax.boxplot(data[param])
ax.set_ylabel(param)
params = ["data science", "ceo"]
params_data = np.array([df[params[0]], df[params[1]]]).T
ee = EllipticEnvelope()
ee.fit(params_data)
biv_outliers_mask = ee.predict(params_data) == -1
biv_outliers_data = params_data[biv_outliers_mask]
biv_outliers_name = state_code[biv_outliers_mask]
xx, yy = np.meshgrid(np.linspace(params_data[:, 0].min(), params_data[:, 0].max(), 100),
np.linspace(params_data[:, 1].min(), params_data[:, 1].max(), 100))
zz = ee.decision_function(np.c_[xx.ravel(), yy.ravel()])
zz = zz.reshape(xx.shape)
fig = pylab.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
for name, xy in zip(biv_outliers_name, biv_outliers_data):
ax.text(xy[0], xy[1], name)
ax.contour(xx, yy, zz, levels=[0], linewidths=2)
ax.scatter(params_data[:, 0], params_data[:, 1], color='black')
ax.set_xlabel(params[0])
ax.set_ylabel(params[1])
ocsvm = OneClassSVM(nu=0.25, gamma=0.05)
ocsvm.fit(data)
#
state_code[ocsvm.predict(data) == -1]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read CSV
Step2: Save state_code to label outliers. "data" contains just quantitative variables.
Step3: Univariate Outliers
Step4: Get quantile values and IQR for outlier limits.
Step5: Get positions of outliers and use state_code for labels.
Step6: Create box plot for "modern dance."
Step7: Bivariate Outliers
Step8: Compute the "elliptical envelope."
Step9: Get the names and positions of outliers.
Step10: Calculate the decision boundary for the scatterplot.
Step11: Draw the scatterplot with the elliptical envelope and label the outliers.
Step12: Multivariate Outliers
Step13: List the names of the outlying states based on the one-class SVM.
|
7,559
|
<ASSISTANT_TASK:>
Python Code:
class A():
pass
a = A() # create an instance of class A
print (a)
print (type(a))
class Human(object):
name = ''
age = 0
human1 = Human() # create instance of Human
human1.name = 'Anton' # name him (add data to this object)
human1.age = 39 # set the age (add data to this object)
print (type(human1))
print (human1.name)
print (human1.age)
class Human(object):
name = ''
age = 0
def __init__(self, name):
self.name = name
h1 = Human('Anton')
print (h1.name)
print (h1.age)
class Human(object):
''' Human being '''
name = ''
age = 0
def __init__(self, name):
''' Create a Human '''
self.name = name
def grow(self):
''' Grow a Human by one year (in-place) '''
self.age += 1
human1 = Human('Adam')
human1.grow()
print (human1.name)
print (human1.age)
class Human(object):
''' Human being '''
name = ''
age = 0
def __init__(self, name):
''' Create a Human '''
self.name = name
def grow(self):
''' Grow a Human by one year (in-place) '''
self.age += 1
def get_name(self):
''' Return name of a Human '''
return self.name
def get_age(self):
''' Return name of a Human '''
return self.age
h1 = Human('Eva')
print (h1.get_name())
class Teacher(Human):
''' Teacher of Python '''
def give_lecture(self):
''' Print lecture on the screen '''
print ('bla bla bla')
t1 = Teacher('Anton')
while t1.get_age() < 50:
t1.grow()
print (t1.get_name())
print (t1.get_age())
t1.give_lecture()
# add directory scripts to PYTHONPATH (searchable path)
import sys
sys.path.append('scripts')
from human_teacher import Teacher
t1 = Teacher('Morten')
t1.give_lecture()
## add scripts to the list of searchable directories
import sys
sys.path.append('scripts')
# import class definiton our module
from ts_profile import Profile
# load data
p = Profile('data/tsprofile.txt')
# work with the object
print (p.get_ts_at_level(5))
print (p.get_ts_at_depth(200))
print (p.get_mixed_layer_depth(.1))
from st_profile import load_profile, get_ts_at_level, get_ts_at_depth
from st_profile import get_mixed_layer_depth, plot_ts
depth, temp, sal = load_profile('tsprofile.txt')
print (get_ts_at_level(depth, temp, sal))
from nansat import Nansat
n = Nansat('satellite_filename.hdf')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use class
Step2: Definition of a class with attributes (properties)
Step3: Definition of a class with constructor
Step4: Create a Human instance and give him a name instantly
Step5: Definition of a class with several methods
Step6: Create a Human, give him a name, grow by one year (in-place)
Step7: Add get_ methods to the class
Step8: Create a class with Inheritance
Step9: Create an Teacher with name, grow him sufficiently, use him.
Step10: Import class definition from a module
Step11: Practical example
Step12: How would it look without OOP?
Step13: 2. A lot of data to unpack and to pass between functions
Step14: 3. And imagine now we open a satellite image which has
|
7,560
|
<ASSISTANT_TASK:>
Python Code:
from poppy.creatures import PoppyTorso
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
poppy = PoppyTorso(simulator='vrep')
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
poppy.motors
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
poppy.head_z.goto_position(90,1)
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
# pour remettre la simulation à zéro :
poppy.reset_simulation()
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
# pour mettre les bras à l'horizontale
poppy.r_shoulder_x.goto_position(-100,1)
poppy.l_shoulder_x.goto_position(100,1)
poppy.r_elbow_y.goto_position(100,1)
poppy.l_elbow_y.goto_position(100,1)
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
# pour mettre les bras à l'horizontale
poppy.r_shoulder_x.goto_position(-100,1)
poppy.l_shoulder_x.goto_position(100,1,wait=True)
poppy.r_elbow_y.goto_position(100,1)
poppy.l_elbow_y.goto_position(100,1)
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
# pour remettre les bras dans leur position de départ :
poppy.r_elbow_y.goto_position(0,1)
poppy.l_elbow_y.goto_position(0,1,wait=True)
poppy.r_shoulder_x.goto_position(0,1)
poppy.l_shoulder_x.goto_position(0,1,wait=True)
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
poppy.head_z.goto_position(40,1,wait=True)
poppy.head_z.goto_position(-40,1,wait=True)
poppy.head_z.goto_position(40,1,wait=True)
poppy.head_z.goto_position(-40,1,wait=True)
poppy.head_z.goto_position(0,1,wait=True)
poppy.r_shoulder_x.goto_position(-90,2)
poppy.l_shoulder_x.goto_position(90,2)
poppy.l_arm_z.goto_position(90,2)
poppy.r_arm_z.goto_position(50,2,wait=True)
poppy.r_shoulder_x.goto_position(0,2)
poppy.l_shoulder_x.goto_position(0,2)
poppy.l_arm_z.goto_position(0,2)
poppy.r_arm_z.goto_position(0,2,wait=True)
poppy.r_shoulder_x.goto_position(-90,2)
poppy.l_shoulder_x.goto_position(90,2)
poppy.l_arm_z.goto_position(-50,2)
poppy.r_arm_z.goto_position(-90,2,wait=True)
poppy.r_shoulder_x.goto_position(0,2)
poppy.l_shoulder_x.goto_position(0,2)
poppy.l_arm_z.goto_position(0,2)
poppy.r_arm_z.goto_position(0,2,wait=True)
poppy.l_arm_z.goto_position(90,3)
poppy.r_arm_z.goto_position(-90,3,wait=True)
poppy.r_arm_z.goto_position(0,3)
poppy.l_arm_z.goto_position(0,3,wait=True)
poppy.l_arm_z.goto_position(90,3)
poppy.r_arm_z.goto_position(-90,3,wait=True')
poppy.r_arm_z.goto_position(0,3)
poppy.l_arm_z.goto_position(0,3,wait=True)
poppy.r_shoulder_x.goto_position(-90,3)
poppy.l_shoulder_x.goto_position(90,3,wait=True')
poppy.r_shoulder_y.goto_position(30,3)
poppy.l_shoulder_y.goto_position(-30,3,wait=True)
poppy.r_shoulder_y.goto_position(-30,3)
poppy.l_shoulder_y.goto_position(30,3,wait=True)
for m in poppy.motors :
m.goto_position(0,1)
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
poppy.close()
# Ecrivez votre code ci-dessous et éxecutez le.
# Une correction est donnée à titre indicatif :
poppy = PoppyTorso()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ensuite, vous allez créer un objet s'appellant poppy et étant un robot de type PoppyTorso. Vous pouvez donner le nom que vous souhaitez à votre robot. Il vous suffit d'écrire
Step2: Comme toute chose en language Python, notre robot poppy est un objet qui contient d'autres objets qui sont ses moteurs.
Step3: Tous les mouvements sont basés sur des rotations des moteurs situés aux articulations. Il suffit de fixer l'angle que l'on désire pour un moteur. Pour cela, nous pouvons utiliser la méthode
Step4: A présent choisissez un moteur au hasard dans la liste de moteurs obtenues précédemment et faîtes le bouger pour le localiser sur le robot.
Step5: Si votre robot ne répond plus et que vous ne comprenez pas pourquoi, le programme de contrôle du robot ou l'interface Jupiter est peut être hors service, dans ce cas vous pouvez recharger les programmes en choissisant Kernel puis Restart dans le menu de Jupyter. Il faut ensuite tout recommencer au début de ce guide.
Step6: Vous avez sans doute remarqué que les mouvements de tous les moteurs s'éxécutent en même temps, en simultané.
Step7: Les bras sont à l'horizontale, remettez les dans leur position de départ, c'est à dire avec les angles des moteurs à 0 degrés.
Step8: A présent que vous savez, faire bouger votre robot, soyez créatif et inventez une danse pour lui !
Step9: Pour terminer la simulation, il faut arréter le robot
Step10: Deuxième partie
|
7,561
|
<ASSISTANT_TASK:>
Python Code:
import requests
from scrapy.http import TextResponse
url = "https://www.fragrantica.com/designers/Dolce%26Gabbana.html"
user_agent = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/58: .0.3029.110 Chrome/58.0.3029.110 Safari/537.36'}
r = requests.get(url, headers=user_agent)
response = TextResponse(r.url, body=r.text, encoding='utf-8')
# Navigate the perfume list
c = response.xpath('//div[@id="col1"]/div[@class="perfumeslist"]/div/div/p/a//text()').extract()
print("There are {} perfumes from Dolce & Gabanna".format(len(c)))
print(c)
def parse_perfume_data(response):
my_list = []
for row in response.xpath('//div[@id="col1"]/div[@class="perfumeslist"]'):
perfume = {}
perfume['name'] = row.xpath('div/div/p/a//text()').extract()[0]
perfume['year'] = year(row.xpath('div/div/p/span[@class="mtext"]/span/strong/text()').extract())
perfume['gender'] = row.xpath('div/@class').extract()[0].split(' ')[1][6:]
perfume['img'] = row.xpath('div/div/p/a/img//@src').extract()[0]
perfume['url'] = row.xpath('div/div/p/a/@href').extract()[0]
my_list.append(perfume)
return my_list
def year(y):
if len(y) >= 1:
return y[0]
else:
return ''
data = parse_perfume_data(response)
print(data[0])
print(data[1])
from collections import Counter
Counter(token['gender'] for token in data)
Counter(token['year'] for token in data)
years = list(sorted(set([p['year'] for p in data[1:]])))
yTotal = Counter(token['year'] for token in data)
print('Year', 'M', 'F', 'U', 'T')
for y in years:
filtered = list(filter(lambda d: d['year'] == y, data))
count = Counter(token['gender'] for token in filtered)
print(y, count['male'], count['female'], count['unisex'], yTotal[y])
data
# display D&G Perfume Bottles
from IPython.display import Image, HTML, display
from glob import glob
def make_html(image):
return '<img src="{}" style="display:inline;margin:1px"/>'.format(image)
item = ''.join( [make_html(x['img']) for x in data] )
display(HTML(item))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once we have the response, which is a huge chunk of minimized html tags, we need to navigate through the DOM structure to get exactly the information needed. The perfumes are thankfully listed in the tree with the ID #col1, so I can start there as the root and get all the perfume names by picking specific child DOM nodes.
Step2: The extract() method returns a list, so it was easy to get the number of items by finding the length of the list.
Step3: Now we call this function while passing in the response to get the data, which is a structured extraction from the response. I chose some easy to remember field names to use as dictionary keys.
Step4: Here's how the data looks like after parsing.
Step5: Data Analysis
Step6: Values where,
|
7,562
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
!pip install -U okpy
from client.api.notebook import Notebook
ok = Notebook('lab09.ok')
scandals = pd.read_csv('scandals.csv')
scandals.set_index('scandal_id', inplace=True)
scandals
def display_points(points, xlim, title):
Displays a timeline with points on it.
Args:
points (ndarray): A list of floats in the range [xlim[0], xlim[1]],
each one a point to display in the timeline.
xlim (list-like): A list/tuple/array with 2 elements giving the
start and end of the timeline.
title (str): The name to display on the plot.
fig, ax = plt.subplots(1)
fig.set_size_inches(8, 1)
ax.scatter(points, np.repeat(0, len(points)))
ax.axhline(0, color="grey", zorder=-1, lw=.5)
ax.yaxis.set_visible(False)
ax.xaxis.set_visible(True)
ax.set_xlim(xlim)
ax.set_title("{} ({:d} total points)".format(title, len(points)))
plt.show()
num_months = 24
display_points(scandals['time'], [0, num_months], 'Scandals at Ooober') #SOLUTION
average_scandals_per_month = len(scandals) / num_months #SOLUTION
average_scandals_per_month
_ = ok.grade('q3')
_ = ok.backup()
average_scandals_per_second = average_scandals_per_month / (30*24*60*60) #SOLUTION
average_scandals_per_second
_ = ok.grade('q4')
_ = ok.backup()
p = average_scandals_per_second #SOLUTION
p
_ = ok.grade('q5')
_ = ok.backup()
seconds_in_a_month = 30*24*60*60
q6_simulated_scandals = np.where(np.random.random(size=num_months*seconds_in_a_month) <= p)[0] / seconds_in_a_month #SOLUTION
display_points(q6_simulated_scandals, [0, num_months], "Simulated scandals in 2 years")
q6_simulated_scandals
def draw_approximate_poisson_process(rate, length, steps_per_unit_length):
Draws from an approximate Poisson process on [0, length] with the given rate.
This function simulates the number of times a thing happens, if the
thing happens or doesn't happen randomly according to a Bernoulli
distribution at each time step. The number of time steps simulated
is:
length * steps_per_unit_length.
The average number of things that happen is:
rate * length.
Args:
rate (float): The average number of times the thing happens per
unit length.
length (float): The length of time to be simulated.
steps_per_unit_length (float): The number of Bernoulli draws per unit length.
length*steps_per_unit_length must be an integer.
(That guarantee isn't strictly necessary but
simplifies the implementation of this function.)
Returns:
ndarray: A NumPy array containing the times when the thing happened.
(If a event happened during a time step, this function can
choose any time for the event between the time step and the
next one.)
# We suggest computing the number of steps like this:
num_steps = int(np.round(length * steps_per_unit_length))
p = rate / steps_per_unit_length #SOLUTION
return np.where(np.random.random(size=num_steps) <= p)[0] / steps_per_unit_length #SOLUTION
draw_approximate_poisson_process(average_scandals_per_month, num_months, seconds_in_a_month)
num_simulations = 1000
minutes_in_a_month = 30*24*60
three_or_more_scandals_count = np.count_nonzero([
max(np.bincount(draw_approximate_poisson_process(average_scandals_per_month, num_months, minutes_in_a_month).astype(int))) >= 3
for _ in range(num_simulations)])
three_or_more_scandals_chance = three_or_more_scandals_count / num_simulations
three_or_more_scandals_chance
_ = ok.grade('q8')
_ = ok.backup()
# Example values for r, l, and d so that this cell can run:
r = .2
l = 10
d = 3
n = l*d #SOLUTION
p = r/d #SOLUTION
_ = ok.grade('q9')
_ = ok.backup()
def plot_poisson_approximation(r, l, num_step_sizes, num_simulations):
import math
max_output = int(r*l + 6*(r*l)**0.5)
true_poisson_pmf = [math.exp(-r*l) * (r*l)**k / math.factorial(k) for k in range(max_output)]
min_steps_per_length = r*2
steps_per_length = min_steps_per_length * 2**np.arange(num_step_sizes)
def approximate_pmf(s):
draws = [len(draw_approximate_poisson_process(r, l, s)) for _ in range(num_simulations)]
return np.bincount(draws, minlength=max_output) / num_simulations
approximate_pmfs = [approximate_pmf(s) for s in steps_per_length]
approximate_pmf_names = ["Approximation to Poisson PMF (Binomial({:d}, {:.4f}))".format(int(np.round(l*s)), r/s)
for s in steps_per_length]
from matplotlib import cm
colors = [cm.jet(x) for x in np.linspace(0, 1, len(steps_per_length)+1)]
for pmf, name, color in zip(approximate_pmfs + [true_poisson_pmf], approximate_pmf_names + ["True Poisson({:.4f}) PMF".format(r*l)], colors):
plt.scatter(range(len(pmf)), pmf, c=color, label=name, s=40)
plt.plot(range(len(pmf)), pmf, c=color, linestyle='dashed')
plt.legend()
plt.title("Approximations to the Poisson distribution\n(dashed lines are for contrast purposes only)")
plt.xlabel("Count")
plt.ylabel("Probability")
plot_poisson_approximation(.2, 10, 5, 40000)
data = pd.read_csv("least_squares_data.csv")
data.head()
def display_linear_prediction(data, theta=None, orient_x = 45, orient_y = 45):
import math
from matplotlib import patches, cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
plot_predictions = theta is not None
fig = plt.figure()
ax = fig.gca(projection='3d')
if plot_predictions:
actual_xs = data[['x0', 'x1']].as_matrix()
predicted_ys = np.dot(actual_xs, theta)
def plot_points_and_residuals(point_indicators, ax, occluded):
to_plot = data.loc[point_indicators]
ax.hold(True)
ax.scatter(to_plot["x0"], to_plot["x1"], to_plot["y"],
c='black',
zorder=-1 if occluded else 100)
for i in np.where(point_indicators)[0]:
x0, x1 = actual_xs[i,:]
y = data['y'][i]
predicted_y = predicted_ys[i]
ax.hold(True)
ax.plot([x0, x0], [x1, x1], [y, predicted_y],
c='red',
linestyle='dotted' if occluded else 'solid',
lw=1 if occluded else 2)
# Figuring out which points are in front of the surface:
orient_x_rad = orient_x*math.pi/180
orient_y_rad = orient_y*math.pi/180
viewpoint_coords = [
np.sin(orient_x_rad)*np.cos(orient_y_rad),
np.cos(orient_x_rad)*np.cos(orient_y_rad),
np.sin(orient_y_rad)]
prediction_surface_normal = [theta[0], theta[1], -1]
viewpoint_above_surface = np.dot(viewpoint_coords, prediction_surface_normal) >= 0
point_in_front_of_surface = ((predicted_ys - data['y']) >= 0) == viewpoint_above_surface
plot_points_and_residuals(~point_in_front_of_surface, ax, True)
# Plotting the surface:
xs = np.array(np.meshgrid(
np.linspace(min(data['x0']), max(data['x0']), 5),
np.linspace(min(data['x1']), max(data['x1']), 5)))
ys = np.tensordot(theta, xs, axes=1)
ax.hold(True)
prediction_plane = ax.plot_surface(xs[0], xs[1], ys,
cmap=cm.coolwarm)
plot_points_and_residuals(point_in_front_of_surface, ax, False)
squared_loss = np.mean((predicted_ys - data['y'])**2)
plt.title("data, predictions, and residuals\n(current average squared prediction error = {:.2f})".format(squared_loss))
else:
prediction_plane = ax.scatter(data["x0"], data["x1"], data["y"],
cmap=cm.coolwarm)
plt.title("raw data")
ax.zaxis.set_major_locator(LinearLocator(5))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.1f'))
ax.view_init(orient_y, orient_x)
ax.set_xlabel("x0 (feature 0)")
ax.set_ylabel("x1 (feature 1)")
ax.set_zlabel("y (the thing we're predicting)")
plt.show()
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
def plot_predictor(theta0, theta1, orient_x, orient_y, plot_predictions):
theta = [theta0, theta1]
display_linear_prediction(data, theta if plot_predictions else None, orient_x, orient_y)
theta0 = widgets.FloatSlider(min=-3, max=3, step=.1, value=0)
theta1 = widgets.FloatSlider(min=-3, max=3, step=.1, value=0)
orient_x = widgets.FloatSlider(min=0, max=90, step=1, value=45, description="x rotation")
orient_y = widgets.FloatSlider(min=0, max=90, step=1, value=45, description="y rotation")
plot_predictions = widgets.Checkbox(value=True, description="Plot predictions on top of data?")
interact(plot_predictor, theta0=theta0, theta1=theta1, orient_x=orient_x, orient_y=orient_y, plot_predictions=plot_predictions);
def least_squares_loss(theta):
The average squared prediction error when the function
f: x => theta . x
is used to predict y for our dataset.
The dataset is the DataFrame named data.
Args:
theta (ndarray): A vector of p numbers. The prediction function
is f: x => theta . x, the dot product with theta.
Returns:
float: The average (over the DataFrame named data) of the
squared prediction error.
# Our solution defined an array called predictions; you
# don't have to.
predictions = np.dot(data[['x0', 'x1']].as_matrix(), theta) #SOLUTION
return np.mean((data['y'] - predictions)**2) #SOLUTION
_ = ok.grade('q11')
_ = ok.backup()
import scipy.optimize as scopt
optimization_result = scopt.minimize(least_squares_loss, x0=np.random.normal(0, 1, size=2)) #SOLUTION
optimization_result
_ = ok.grade('q12')
_ = ok.backup()
windowsize = 5
maxloss = least_squares_loss([windowsize, -windowsize])
losses_flattened = pd.DataFrame.from_records(
[(t0, t1)
for t0 in np.linspace(-windowsize, windowsize, 30)
for t1 in np.linspace(-windowsize, windowsize, 30)
for _ in range(int(math.ceil((maxloss-least_squares_loss([t0, t1]))/(maxloss/10))))],
columns=["theta0", "theta1"])
def display_loss_heatmap(theta):
sns.kdeplot(losses_flattened['theta0'], losses_flattened['theta1'], shade=True, n_levels=100, color='red')
plt.scatter(theta[0], theta[1], color='orange', s=100, label="proposed theta")
plt.xlim([-windowsize, windowsize])
plt.ylim([-windowsize, windowsize])
plt.gca().set_aspect('equal', adjustable='box')
plt.xlabel("theta0")
plt.ylabel("theta1")
plt.title("average squared prediction error by theta\n(darker is lower)")
plt.legend()
def plot_loss(theta0, theta1):
theta = [theta0, theta1]
display_loss_heatmap(theta)
theta0 = widgets.FloatSlider(min=-3, max=3, step=.1, value=0)
theta1 = widgets.FloatSlider(min=-3, max=3, step=.1, value=0)
orient_x = widgets.FloatSlider(min=0, max=90, step=1, value=45, description="x rotation")
orient_y = widgets.FloatSlider(min=0, max=90, step=1, value=45, description="y rotation")
plot_predictions = widgets.Checkbox(value=True, description="Plot predictions on top of data?")
interact(plot_predictor, theta0=theta0, theta1=theta1, orient_x=orient_x, orient_y=orient_y, plot_predictions=plot_predictions);
theta0 = widgets.FloatSlider(min=-3, max=3, step=.1, value=0)
theta1 = widgets.FloatSlider(min=-3, max=3, step=.1, value=0)
interact(plot_loss, theta0=theta0, theta1=theta1);
i_finished_the_lab = False
_ = ok.grade('qcompleted')
_ = ok.backup()
_ = ok.submit()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Today's lab has two topics
Step3: The following cell defines a function for viewing timelines of events.
Step4: Question 2
Step5: Question 3
Step6: Our goal is to simulate possible future scandal patterns. To do that, we need a model for the scandals, and we need to learn the parameters of that model from our data.
Step7: It seems okay to neglect the chance that there are 2 scandals in one second! In that case, whether a scandal happens in a particular second is a Bernoulli($p$) random variable
Step8: Question 6
Step10: Question 7
Step11: Question 8
Step12: The Poisson distribution
Step13: As d increases, the time steps get narrower, and the Binomial(l*d, r/d) distribution becomes the same as the Poisson(l*r) distribution. In homework 5, you'll see another way to simulate data from the Poisson process that takes advantage of this fact. The cell below uses your draw_approximate_poisson_process function to show by simulation that
Step14: Least squares
Step16: Question 10
Step17: Question 12
Step18: Question 13
Step19: Submitting your assignment
|
7,563
|
<ASSISTANT_TASK:>
Python Code:
import os
import glob
import time
from SimpleCV import *
import scipy
import numpy as np
import tensorflow as tf
import collections
import matplotlib.pyplot as plt
import cv2
import imutils
from skimage.transform import pyramid_gaussian
import argparse
import cv2
from scipy import ndimage
from scipy.ndimage import sum as ndi_sum
from subprocess import check_output
from skimage.transform import pyramid_gaussian
from sklearn.svm import SVC
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import log_loss
import matplotlib.pyplot as plt
%matplotlib inline
################################## Functions definition ###################################
#These functions are inspired from http://www.pyimagesearch.com/
def pyramid(image, scale=1.5, minSize=(30, 30)):
# yield the original image
yield image
# keep looping over the pyramid
while True:
# compute the new dimensions of the image and resize it
w = int(image.shape[1] / scale)
image = imutils.resize(image, width=w)
# if the resized image does not meet the supplied minimum
# size, then stop constructing the pyramid
if image.shape[0] < minSize[1] or image.shape[1] < minSize[0]:
break
# yield the next image in the pyramid
yield image
def sliding_window(image, stepSize, windowSize):
# slide a window across the image
for y in xrange(0, image.shape[0], stepSize):
for x in xrange(0, image.shape[1], stepSize):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
# lenet 5
def findHOGFeatures(self, n_divs=3, n_bins=6):
**SUMMARY**
Get HOG(Histogram of Oriented Gradients) features from the image.
**PARAMETERS**
* *n_divs* - the number of divisions(cells).
* *n_divs* - the number of orientation bins.
**RETURNS**
Returns the HOG vector in a numpy array
n_HOG = n_divs * n_divs * n_bins # Size of HOG vector
HOG = np.zeros((n_HOG, 1)) # Initialize output HOG vector
# Apply sobel on image to find x and y orientations of the image
Icv = self.getNumpyCv2()
Ix = cv2.Sobel(Icv, ddepth=cv.CV_32F, dx=1, dy=0, ksize=3)
Iy = cv2.Sobel(Icv, ddepth=cv.CV_32F, dx=0, dy=1, ksize=3)
Ix = Ix.transpose(1, 0, 2)
Iy = Iy.transpose(1, 0, 2)
cellx = self.width / n_divs # width of each cell(division)
celly = self.height / n_divs # height of each cell(division)
# Area of image
img_area = self.height * self.width
#Range of each bin
BIN_RANGE = (2 * pi) / n_bins
angles = np.arctan2(Iy, Ix)
magnit = ((Ix ** 2) + (Iy ** 2)) ** 0.5
height, width = self.height, self.width
bins = (angles[...,0] % (2 * pi) / BIN_RANGE).astype(int)
x, y = np.mgrid[:width, :height]
x = x * n_divs // width
y = y * n_divs // height
labels = (x * n_divs + y) * n_bins + bins
index = np.arange(n_HOG)
HOG = ndi_sum(magnit[..., 0], labels, index)
return HOG / (height*width)
#Define some values and constants
fish_classes = ['ALB','BET','LAG','DOL','SHARK','YFT','NoF']
fish_classes_test = ['Fish','NoFish']
number_classes = len(fish_classes)
main_path_train = '../train_cut_oversample'
main_path_test = '../test'
extension = "*.jpg"
############################## Get HOG of fish and No-fish cases ###################################
#One array per classifier
HOG = []
HOG_n = []
HOG_ALB = []
HOG_BET = []
HOG_DOL = []
HOG_LAG = []
HOG_SHARK = []
HOG_YFT = []
#Construct arrays
for classes in fish_classes:
#Acces the files
path_class = os.path.join(main_path_train,classes)
directory = os.path.join(path_class, extension)
files = glob.glob(directory)
for file in files:
new_img = cv2.imread(file)
H = findHOGFeatures(Image(new_img))
if classes != 'NoF':
HOG.append(H)
if classes == 'ALB':
HOG_ALB.append(H)
if classes == 'BET':
HOG_BET.append(H)
if classes == 'DOL':
HOG_DOL.append(H)
if classes == 'LAG':
HOG_LAG.append(H)
if classes == 'SHARK':
HOG_SHARK.append(H)
if classes == 'YFT':
HOG_YFT.append(H)
else:
HOG_n.append(H)
HOG = np.array(HOG)
HOG_ALB = np.array(HOG_ALB)
HOG_BET = np.array(HOG_BET)
HOG_DOL = np.array(HOG_DOL)
HOG_LAG = np.array(HOG_LAG)
HOG_SHARK = np.array(HOG_SHARK)
HOG_YFT = np.array(HOG_YFT)
HOG_n = np.array(HOG_n)
#Print shapes of the arrays
print HOG.shape
print HOG_ALB.shape
print HOG_BET.shape
print HOG_DOL.shape
print HOG_LAG.shape
print HOG_SHARK.shape
print HOG_YFT.shape
print HOG_n.shape
############################## Build and train the classifiers ###################################
#SVM with all classes against No Fish
X = np.concatenate((HOG, HOG_n),axis = 0)
class_one = np.ones(HOG.shape[0])
class_zero = np.zeros(HOG_n.shape[0])
y = np.concatenate((class_one, class_zero), axis=0)
clf_all = SVC(probability=True)
clf_all.fit(X, y)
#SVM: ALB vs No Fish
X = np.concatenate((HOG_ALB, HOG_n),axis = 0)
class_one = np.ones(HOG_ALB.shape[0])
class_zero = np.zeros(HOG_n.shape[0])
y = np.concatenate((class_one, class_zero), axis=0)
clf_ALB = SVC(probability=True)
clf_ALB.fit(X,y)
#SVM: BET vs No Fish
X = np.concatenate((HOG_BET, HOG_n),axis = 0)
class_one = np.ones(HOG_BET.shape[0])
class_zero = np.zeros(HOG_n.shape[0])
y = np.concatenate((class_one, class_zero), axis=0)
clf_BET = SVC(probability=True)
clf_BET.fit(X,y)
#SVM: DOL vs No Fish
X = np.concatenate((HOG_DOL, HOG_n),axis = 0)
class_one = np.ones(HOG_DOL.shape[0])
class_zero = np.zeros(HOG_n.shape[0])
y = np.concatenate((class_one, class_zero), axis=0)
clf_DOL = SVC(probability=True)
clf_DOL.fit(X,y)
#SVM: LAG vs No Fish
X = np.concatenate((HOG_LAG, HOG_n),axis = 0)
class_one = np.ones(HOG_LAG.shape[0])
class_zero = np.zeros(HOG_n.shape[0])
y = np.concatenate((class_one, class_zero), axis=0)
clf_LAG = SVC(probability=True)
clf_LAG.fit(X,y)
#SVM: SHARK vs No Fish
X = np.concatenate((HOG_SHARK, HOG_n),axis = 0)
class_one = np.ones(HOG_SHARK.shape[0])
class_zero = np.zeros(HOG_n.shape[0])
y = np.concatenate((class_one, class_zero), axis=0)
clf_SHARK = SVC(probability=True)
clf_SHARK.fit(X,y)
#SVM: YFT vs No Fish
X = np.concatenate((HOG_YFT, HOG_n),axis = 0)
class_one = np.ones(HOG_YFT.shape[0])
class_zero = np.zeros(HOG_n.shape[0])
y = np.concatenate((class_one, class_zero), axis=0)
clf_YFT = SVC(probability=True)
clf_YFT.fit(X,y)
###################################### Apply 6 classifiers (buffer) ##################################
(winW, winH) = (100, 100)
#Apply classifier on test
directory = os.path.join(main_path_test, extension)
files = glob.glob(directory)
extension = "*.jpg"
for classes in fish_classes:
path_class = os.path.join(main_path_test,classes)
directory = os.path.join(path_class, extension)
files = glob.glob(directory)
for file in files:
image = cv2.imread(file)
prob_ALB = 0
prob_BET = 0
prob_DOL = 0
prob_LAG = 0
prob_SHARK = 0
prob_YFT = 0
# loop over the image pyramid
for resized in pyramid(image, scale=1.5):
# loop over the sliding window for each layer of the pyramid
for (x, y, window) in sliding_window(resized, stepSize=64, windowSize=(winW, winH)):
# if the window does not meet our desired window size, ignore it
if window.shape[0] != winH or window.shape[1] != winW:
continue
H = findHOGFeatures(Image(window))
#Predict probability for each class
p_ALB = clf_ALB.predict_proba([H])
p_BET = clf_BET.predict_proba([H])
p_DOL = clf_DOL.predict_proba([H])
p_LAG = clf_LAG.predict_proba([H])
p_SHARK = clf_SHARK.predict_proba([H])
p_YFT = clf_YFT.predict_proba([H])
#Store frame with the highest probability per class
if prob_ALB < p_ALB[0,1]:
prob_ALB = p_ALB[0,1]
wind_ALB = window
if prob_BET< p_BET[0,1]:
prob_BET = p_BET[0,1]
wind_BET = window
if prob_DOL<p_DOL[0,1]:
prob_DOL = p_DOL[0,1]
wind_DOL = window
if prob_LAG<p_LAG[0,1]:
prob_LAG = p_LAG[0,1]
wind_LAG = window
if prob_SHARK<p_SHARK[0,1]:
prob_SHARK = p_SHARK[0,1]
wind_SHARK = window
if prob_YFT<p_YFT[0,1]:
prob_YFT = p_YFT[0,1]
wind_YFT = window
j = 0
for wind in [wind_ALB,wind_BET,wind_DOL,wind_LAG,wind_SHARK,wind_YFT] :
f = str(os.path.basename(file))
cv2.imwrite("buffer/"+str(classes)+"/"+f[:-4]+"_"+str(j)+"0.jpg", wind)
j = j+1
###################################### Apply 1 classifier (fish_detected) ##################################
#from PIL import Image
path = "buffer/"
extension2 = "*_00.jpg"
nam = ""
directory = os.path.join(path, extension)
files = glob.glob(directory)
for classes in fish_classes:
#Access folders
path_class = os.path.join(path,classes)
directory = os.path.join(path_class, extension2)
files = glob.glob(directory)
for file in files:
prob_fish = 0
f = str(os.path.basename(file))
#Access files
ext = f[:-6]+"*.jpg"
direct = os.path.join(path_class, ext)
for name in glob.glob(direct):
#Open image
img = cv2.imread(name)
if img.shape == (100,100,3): #Check that the image generated by the slidding window has the right size
#Predict probabilities
H = findHOGFeatures(Image(img))
aux = clf_all.predict_proba([H])
#Store highest probability frame
if prob_fish < aux[0,1]:
prob_fish = aux[0,1]
img = np.reshape(img, (ROWS_RESIZE, COLS_RESIZE,3))
img_save = img
nam = name
#Save frame
cv2.imwrite("fish_detected/"+str(classes)+"/"+str(os.path.basename(nam)), img_save)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: After importing the libraries we declare the functions that we need for the fish detection. As introduced above we need a slidding window and a classificator to determine the probability of a frame containing a fish.
Step3: The last definitions to be made are the constant parameters used along this notebook. Due to the unbalance amount of data, we oversample the classes (BET, DOL, LAG and SHARK).
Step4: Next, we generate the HOG arrays for all seven classifiers
Step5: As already explained, the output of the following code are six frames per image, stored in a folder called "buffer". The fact that we have the test data organized in classes does not influence neither de detection nor the classification, it just helps us to check the results.
Step6: Finally, we apply the SVM "Fish" vs "No Fish" in order to select the image which is the most similar to a fish and we store in a folder called "fish_detected"
|
7,564
|
<ASSISTANT_TASK:>
Python Code:
import requests
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import time
import os
import getpass
# Directive to matblotlib for creating interactive graphs
# Use %matplotlib inline for just creating the plots
%matplotlib notebook
# Gaia Archive REST URL
gacs_url = 'https://gea.esac.esa.int/tap-server/'
# URL For synchronous queries
gacs_sync_url = gacs_url + 'tap/sync'
# URL For asynchronous queries
gacs_async_url = gacs_url + 'tap/async'
# Login URL
gacs_login_url = gacs_url + 'login'
# Delete JOBS URL
gacs_delete_url = gacs_url + 'tap/deletejobs'
# HTTP Session object
session = requests.Session()
# Ask for the password (COSMOS)
password = getpass.getpass('Enter cosmos password:')
# POST the authentication
resp = session.post(gacs_login_url,data = {'username':'edelpozo','password':password})
def execute_query(query,results_format='csv',delete_job=True):
global gacs_async_url
global gacs_delete_url
global session
# Submit the Job to Gacs user space
gacs_query = {'PHASE':'RUN','REQUEST':'doQuery','LANG':'ADQL','FORMAT':results_format,'QUERY':query}
start = time.time()
resp = session.post(gacs_async_url,params=gacs_query)
if resp.status_code != 200:
raise Exception('Error launching the job ... ', query , resp.text)
url_job = resp.url
print('Submitted job to GACS: Job URL=' + url_job)
# Poll the server till the Query is finished
while True:
resp = session.get(url_job)
if resp.status_code != 200:
raise Exception('Error checking job status ... ', url_job , resp.text)
if resp.text.find('COMPLETED') >= 0:
break;
if resp.text.find('ERROR') >= 0:
raise Exception('Error in the query exitting ... ', url_job , resp.text)
time.sleep(0.2)
# Get results
results = session.get(url_job + '/results/result')
if results.status_code != 200:
raise Exception('Error launching the job ... ', query , results.text)
stop = time.time()
print('Query time: ' + str(stop-start) + ' seconds')
# Delete the Job from the Gaia Archive
if delete_job:
job_id = url_job.split('/')[-1]
session.post(gacs_delete_url,data ={'JOB_IDS':job_id})
if resp.status_code != 200:
raise Exception('Error deleting the job ... ', job_id , resp.text)
return results
# TEST the function
resp = execute_query('SELECT TOP 5 source_id,ra,dec FROM gaiadr1.gaia_source')
# Define the query .... getting the parallax from tgas and hipparcos catalogue, apparent magnitude
# and hipparcos b_v parameter.
select = 'SELECT gaia.parallax,hip.plx,gaia.phot_g_mean_mag,hip.b_v '
tables = ' FROM gaiadr1.tgas_source as gaia INNER JOIN public.hipparcos_newreduction as hip on gaia.hip = hip.hip'
where = (' where gaia.parallax/gaia.parallax_error >= 5 and '
'hip.plx/hip.e_plx >= 5 and '
'hip.e_b_v > 0.0 and hip.e_b_v <= 0.05 and '
'2.5/log(10)*gaia.phot_g_mean_flux_error/gaia.phot_g_mean_flux <= 0.05')
gacs_query = select + tables + where
# Run the query
resp = execute_query(gacs_query)
# Load the information from the request results in CSV
gaiaplx, hipplx, ap_gmag, hip_bv = np.loadtxt(resp.text.split('\n'),skiprows=1,delimiter=',',unpack=True)
# Function for computing the absolute magnitude
def abs_mag(parallax,apparent_mag):
return apparent_mag+5*np.log10(np.abs(parallax))-10
# Compute the absolute magnitude for Gaia
abs_mag_gaia = abs_mag(gaiaplx,ap_gmag)
# Plot the
plt.hexbin(hip_bv, abs_mag_gaia, gridsize = 300,cmap=cm.jet, bins='log')
#plt.scatter(hip_bv,abs_mag_gaia, c=heatmap , s=10, cmap=cm.hot , lw=0)
plt.gca().invert_yaxis()
plt.colorbar()
plt.show()
# Compute the absolute magnitude for Hipparcos
abs_mag_hip = abs_mag(hipplx,ap_gmag)
# Plot the
plt.hexbin(hip_bv, abs_mag_hip, gridsize = 300,cmap=cm.jet, bins='log')
#plt.scatter(hip_bv,abs_mag_gaia, c=heatmap , s=10, cmap=cm.hot , lw=0)
plt.gca().invert_yaxis()
plt.colorbar()
plt.show()
hist, xedges, yedges = np.histogram2d(hip_bv,abs_mag_gaia, bins=300)
xidx = np.clip(np.digitize(hip_bv, xedges), 0, hist.shape[0]-1)
yidx = np.clip(np.digitize(abs_mag_gaia, yedges), 0, hist.shape[1]-1)
c = hist[xidx, yidx]
plt.gca().invert_yaxis()
plt.scatter(hip_bv,abs_mag_gaia, c=np.log(c), s=2,cmap=cm.jet, lw=0)
plt.colorbar()
plt.show()
select = "SELECT gaia.pmra,gaia.pmdec,gaia.phot_g_mean_mag,gaia.ra,gaia.dec"
tables = " FROM gaiadr1.tgas_source as gaia INNER JOIN public.hipparcos_newreduction as hip on gaia.hip = hip.hip"
where = (" WHERE 1=CONTAINS(POINT('ICRS',gaia.l,gaia.b), "
"CIRCLE('ICRS',-80.33880679040755,-33.99943305252677,3.1))"
" AND gaia.source_id IN "
"(4655349652394811136,4655510043652327552,4655460771785226880,4661769941306044416,"
"4661720532007512320,4655136518933846784,4655158131209278464,4662293892954562048,"
"4654621500815442816,4655036841335115392,4661920986713556352,4661472145451256576,"
"4662061311885050624,4651629489160555392,4658269336800428672,4658204053297963392,"
"4658137739001073280,4660601607121368704,4660444926713007872,4658486455992620416,"
"4660175580731856128,4660124762671796096,4660246224352015232,4657280635327480832,"
"4657700408260606592,4657627943562907520,4657722879521554176,4659188769038018816,"
"4659091084305723392)")
gacs_query = select + tables + where
resp = execute_query(gacs_query)
# Load the information from the request results
pm_ra,pm_dec,gmag, ra, dec = np.loadtxt(resp.text.split('\n'),skiprows=1,delimiter=',',unpack=True)
# GMAG ...
area = np.linspace(np.min(gmag), np.max(gmag), 10)
# Show the scatter plot
plt.scatter(ra,dec, c=gmag,s=area**2, cmap=cm.YlOrRd, lw=0)
plt.title('LMC with TGAS + Hipparcos')
plt.xlabel('RA')
plt.ylabel('DEC')
pm_ra = pm_ra - np.mean(pm_ra)
pm_dec = pm_dec - np.mean(pm_dec)
# Plot the arrows ...
plt.quiver(ra,dec,pm_ra,pm_dec,width=0.004,alpha=0.5,color='Black')
plt.gca().invert_xaxis()
plt.gca().set_xlim([60.0,100.0])
plt.gca().set_ylim([-74,-64.5])
plt.show()
pleiades_center_dec = 56.75
pleiades_center_ra = 24.12
select = "SELECT gaia.pmra,gaia.pmdec,gaia.phot_g_mean_mag,gaia.ra,gaia.dec"
tables = " FROM gaiadr1.tgas_source as gaia"
where = (" WHERE 1=CONTAINS(POINT('ICRS',gaia.ra,gaia.dec), "
"CIRCLE('ICRS'," + str(pleiades_center_ra) + "," + str(pleiades_center_dec) + ",5))"
" and sqrt(power(gaia.pmra-20.5,2)+power(gaia.pmdec+45.5,2)) < 20.0")
gacs_query = select + tables + where
resp = execute_query(gacs_query)
# Load the information from the request results
pm_ra,pm_dec,gmag, ra, dec = np.loadtxt(resp.text.split('\n'),skiprows=1,delimiter=',',unpack=True)
area = np.linspace(np.min(gmag), np.max(gmag), 10)
val = plt.scatter(ra,dec, c=gmag,s=area**2, cmap=cm.YlOrRd, lw=0)
plt.title('Pleiades proper motion')
plt.xlabel('RA')
plt.ylabel('DEC')
cbar = plt.colorbar(val)
cbar.ax.set_ylabel('gmag')
pm_ra = pm_ra - np.mean(pm_ra)
pm_dec = pm_dec - np.mean(pm_dec)
plt.quiver(ra,dec,pm_ra,pm_dec,width=0.004,alpha=0.5,color='Black')
plt.gca().invert_xaxis()
plt.gca().invert_yaxis()
plt.show()
barnard_center_dec = 4.66828815
barnard_center_ra = 269.45402305
select = "SELECT hip.pmra,hip.pmde,hip.hpmag,hip.ra,hip.de"
tables = " FROM public.hipparcos as hip"
where = (" WHERE 1=CONTAINS(POINT('ICRS',hip.ra,hip.de), "
"CIRCLE('ICRS'," + str(barnard_center_ra) + "," + str(barnard_center_dec) + ",0.466666))")
gacs_query = select + tables + where
resp = execute_query(gacs_query)
# Load the information from the request results
pm_ra,pm_dec,gmag, ra, dec = np.loadtxt(resp.text.split('\n'),skiprows=1,delimiter=',',unpack=True)
area = np.linspace(np.min(gmag), np.max(gmag), 10)
val = plt.scatter(ra,dec, c=gmag,s=area**3, cmap=cm.YlOrRd, lw=0)
plt.title("Barnard's Star proper motion")
plt.xlabel('RA')
plt.ylabel('DEC')
cbar = plt.colorbar(val)
cbar.ax.set_ylabel('gmag')
pm_ra = pm_ra - np.mean(pm_ra)
pm_dec = pm_dec - np.mean(pm_dec)
plt.quiver(ra,dec,pm_ra,pm_dec,width=0.003,alpha=0.5,color='Green')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Log in to GACS
Step2: Create a function for executing asynchronous queries
Step3: Python
Step4: Define a function for computing the absolute magnitude
Step5: Plot the scatter plot for Gaia Abs Mag vs B_V
Step6: Plot the scatter plot for Hipparcos Abs Mag vs B_V
Step7: Python
Step8: Plot the Proper Motion diagram
Step9: Create Proper Motion diagram (Pleiades)
Step10: Create Proper Motion diagram (Bernard's Star)
|
7,565
|
<ASSISTANT_TASK:>
Python Code:
from sqlitedict import SqliteDict
def harness(key, value):
this tests what can be assigned in SqliteDict's keys and values
mydict = SqliteDict(":memory:")
mydict[key] = value
from battle_tested import fuzz, success_map, crash_map
fuzz(harness, keep_testing=True) # keep testing allows us to collect "all" crashes
crash_map()
success_map()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using battle_tested to feel out new libraries.
Step2: Now, we import the tools we need from battle_tested and fuzz it.
Step3: Now we can call success_map() and crash_map() to start to get a feel for what is accepted and what isn't.
|
7,566
|
<ASSISTANT_TASK:>
Python Code:
from rmtk.vulnerability.model_generator.point_dispersion import point_dispersion as pd
from rmtk.vulnerability.common import utils
%matplotlib inline
Sa_means = [0.40, 0.40, 0.40, 0.40]
Sa_covs = [0.20, 0.20, 0.20, 0.20]
Sd_means = [0.03, 0.05, 0.08, 0.1]
Sd_covs = [0.20, 0.20, 0.20, 0.20]
distribution = "normal"
Sa_corr = 0.99999
Sd_corr = 0.99999
Sa_Sd_corr = 0.5
truncation_level = 1
no_capacity_curves = 50
capacity_curves = pd.generate_capacity_curves(Sa_means, Sa_covs, Sd_means, Sd_covs,
distribution, no_capacity_curves,
Sa_corr, Sd_corr, Sa_Sd_corr, truncation_level)
utils.plot_capacity_curves(capacity_curves)
gamma = 1.2
height = 3.0
elastic_period = 0.6
yielding_point_index = 1
capacity_curves = utils.add_information(capacity_curves, 'gamma', 'value', gamma)
capacity_curves = utils.add_information(capacity_curves, 'heights', 'value', height)
capacity_curves = utils.add_information(capacity_curves, 'periods', 'calculate', 1)
capacity_curves = utils.add_information(capacity_curves, 'yielding point', 'point', yielding_point_index)
output_file = "../../../../../rmtk_data/capacity_curves_point.csv"
utils.save_SdSa_capacity_curves(capacity_curves, output_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Capacity curve generator
Step2: Include additional information
Step3: Save capacity curves
|
7,567
|
<ASSISTANT_TASK:>
Python Code:
a = int(input("Enter first integer: "))
b = int(input("Enter second integer: "))
print("Result is :", format((a/b), '.2f'))
a = float(input("Enter first float: "))
b = float(input("Enter second float: "))
print("Result is :", format((a/b), '.6f'))
inp = input("Enter a upper or lower case letter: ")
print("Here is the ASCII encoding of your number: " + str(ord(inp)))
print(format((sum([2**i for i in range(64)])/7000), '.1f'))
age = 20
total_breaths = 45*60*24*365
age -= 1
total_breaths += 25*60*24*365*3
age -= 3
total_breaths += 20*60*24*365*9
age -= 9
total_breaths += 16*60*24*365*7
heart_beats = 67.5*60*60*24*365*20
print("20 years old person had breath approximately: ", total_breaths , " times")
print("20 years old person had heartbeats approximately: ", heart_beats , " times")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: P2
Step2: P3
Step3: Development Problems
Step4: D2
|
7,568
|
<ASSISTANT_TASK:>
Python Code:
#importar los paquetes que se van a usar
import pandas as pd
import pandas_datareader.data as web
import numpy as np
from sklearn.cluster import KMeans
import datetime
from datetime import datetime
import scipy.stats as stats
import scipy as sp
import scipy.optimize as optimize
import scipy.cluster.hierarchy as hac
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
#algunas opciones para Python
pd.set_option('display.notebook_repr_html', True)
pd.set_option('display.max_columns', 6)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 78)
pd.set_option('precision', 3)
def get_historical_closes(ticker, start_date, end_date):
p = web.DataReader(ticker, "yahoo", start_date, end_date).sort_index('major_axis')
d = p.to_frame()['Adj Close'].reset_index()
d.rename(columns={'minor': 'Ticker', 'Adj Close': 'Close'}, inplace=True)
pivoted = d.pivot(index='Date', columns='Ticker')
pivoted.columns = pivoted.columns.droplevel(0)
return pivoted
data=get_historical_closes(['AA','AAPL','AMZN','MSFT','KO','NVDA', '^GSPC'], '2011-01-01', '2016-12-31')
closes=data[['AA','AAPL','AMZN','MSFT','KO','NVDA']]
sp=data[['^GSPC']]
closes.plot(figsize=(8,6));
def calc_daily_returns(closes):
return np.log(closes/closes.shift(1))[1:]
daily_returns=calc_daily_returns(closes)
daily_returns.plot(figsize=(8,6));
daily_returns.corr()
def calc_annual_returns(daily_returns):
grouped = np.exp(daily_returns.groupby(lambda date: date.year).sum())-1
return grouped
annual_returns = calc_annual_returns(daily_returns)
annual_returns
def calc_portfolio_var(returns, weights=None):
if (weights is None):
weights = np.ones(returns.columns.size)/returns.columns.size
sigma = np.cov(returns.T,ddof=0)
var = (weights * sigma * weights.T).sum()
return var
calc_portfolio_var(annual_returns)
def sharpe_ratio(returns, weights = None, risk_free_rate = 0.015):
n = returns.columns.size
if weights is None: weights = np.ones(n)/n
var = calc_portfolio_var(returns, weights)
means = returns.mean()
return (means.dot(weights) - risk_free_rate)/np.sqrt(var)
sharpe_ratio(annual_returns)
daily_returns_mean=daily_returns.mean()
daily_returns_mean
daily_returns_std=daily_returns.std()
daily_returns_std
daily_returns_ms=pd.concat([daily_returns_mean, daily_returns_std], axis=1)
daily_returns_ms
random_state = 10
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(daily_returns_ms)
plt.scatter(daily_returns_mean, daily_returns_std, c=y_pred);
plt.axis([-0.01, 0.01, 0, 0.05]);
corr_mat=daily_returns.corr(method='spearman')
corr_mat
Z = hac.linkage(corr_mat, 'single')
# Plot the dendogram
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
hac.dendrogram(
Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=8., # font size for the x axis labels
)
plt.show()
selected=closes[['AAPL', 'AMZN']]
selected.plot(figsize=(8,6));
daily_returns_sel=calc_daily_returns(selected)
daily_returns_sel.plot(figsize=(8,6));
annual_returns_sel = calc_annual_returns(daily_returns_sel)
annual_returns_sel
def target_func(x, cov_matrix, mean_vector, r):
f = float(-(x.dot(mean_vector) - r) / np.sqrt(x.dot(cov_matrix).dot(x.T)))
return f
def optimal_portfolio(profits, r, allow_short=True):
x = np.ones(len(profits.T))
mean_vector = np.mean(profits)
cov_matrix = np.cov(profits.T)
cons = ({'type': 'eq','fun': lambda x: np.sum(x) - 1})
if not allow_short:
bounds = [(0, None,) for i in range(len(x))]
else:
bounds = None
minimize = optimize.minimize(target_func, x, args=(cov_matrix, mean_vector, r), bounds=bounds,
constraints=cons)
return minimize
opt=optimal_portfolio(annual_returns_sel, 0.015)
opt
annual_returns_sel.dot(opt.x)
asp=calc_annual_returns(calc_daily_returns(sp))
asp
def objfun(W, R, target_ret):
stock_mean = np.mean(R,axis=0)
port_mean = np.dot(W,stock_mean)
cov=np.cov(R.T)
port_var = np.dot(np.dot(W,cov),W.T)
penalty = 2000*abs(port_mean-target_ret)
return np.sqrt(port_var) + penalty
def calc_efficient_frontier(returns):
result_means = []
result_stds = []
result_weights = []
means = returns.mean()
min_mean, max_mean = means.min(), means.max()
nstocks = returns.columns.size
for r in np.linspace(min_mean, max_mean, 150):
weights = np.ones(nstocks)/nstocks
bounds = [(0,1) for i in np.arange(nstocks)]
constraints = ({'type': 'eq', 'fun': lambda W: np.sum(W) - 1})
results = optimize.minimize(objfun, weights, (returns, r), method='SLSQP', constraints = constraints, bounds = bounds)
if not results.success: # handle error
raise Exception(result.message)
result_means.append(np.round(r,4)) # 4 decimal places
std_=np.round(np.std(np.sum(returns*results.x,axis=1)),6)
result_stds.append(std_)
result_weights.append(np.round(results.x, 5))
return {'Means': result_means, 'Stds': result_stds, 'Weights': result_weights}
frontier_data = calc_efficient_frontier(annual_returns_sel)
def plot_efficient_frontier(ef_data):
plt.figure(figsize=(12,8))
plt.title('Efficient Frontier')
plt.xlabel('Standard Deviation of the porfolio (Risk))')
plt.ylabel('Return of the portfolio')
plt.plot(ef_data['Stds'], ef_data['Means'], '--');
plot_efficient_frontier(frontier_data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Uso de Pandas para descargar datos de precios de cierre
Step2: Una vez cargados los paquetes, es necesario definir los tickers de las acciones que se usarán, la fuente de descarga (Yahoo en este caso, pero también se puede desde Google) y las fechas de interés. Con esto, la función DataReader del paquete pandas_datareader bajará los precios solicitados.
Step3: Nota
Step4: 3. Selección de activos
Step5: 4. Optimización de portafolios
|
7,569
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
X, y = load_data()
assert type(X) == np.ndarray
assert type(y) == np.ndarray
pipe = Pipeline([
("scale", StandardScaler()),
("model", SGDClassifier(random_state=42))
])
grid = GridSearchCV(pipe, param_grid={"model__alpha": [1e-3, 1e-2, 1e-1, 1]}, cv=5)
grid.fit(X, y)
coef = grid.best_estimator_.named_steps['model'].coef_
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
7,570
|
<ASSISTANT_TASK:>
Python Code:
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read().lower()
print('corpus length:', len(text))
!tail -n 25 {path}
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
chars.insert(0, "\0")
''.join(chars[1:-6])
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
idx = [char_indices[c] for c in text]
idx[:10]
''.join(indices_char[i] for i in idx[:70])
maxlen = 40
sentences = []
next_chars = []
for i in range(0, len(idx) - maxlen+1):
sentences.append(idx[i: i + maxlen])
next_chars.append(idx[i+1: i+maxlen+1])
print('nb sequences:', len(sentences))
sentences = np.concatenate([[np.array(o)] for o in sentences[:-2]])
next_chars = np.concatenate([[np.array(o)] for o in next_chars[:-2]])
sentences.shape, next_chars.shape
n_fac = 24
model=Sequential([
Embedding(vocab_size, n_fac, input_length=maxlen),
LSTM(units=512, input_shape=(n_fac,),return_sequences=True, dropout=0.2, recurrent_dropout=0.2,
implementation=2),
Dropout(0.2),
LSTM(512, return_sequences=True, dropout=0.2, recurrent_dropout=0.2,
implementation=2),
Dropout(0.2),
TimeDistributed(Dense(vocab_size)),
Activation('softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
def print_example():
seed_string="ethics is a basic foundation of all that"
for i in range(320):
x=np.array([char_indices[c] for c in seed_string[-40:]])[np.newaxis,:] # [-40] picks up the last 40 chars
preds = model.predict(x, verbose=0)[0][-1] # [-1] picks up the last char
preds = preds/np.sum(preds)
next_char = choice(chars, p=preds)
seed_string = seed_string + next_char
print(seed_string)
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.optimizer.lr=0.001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.optimizer.lr=0.0001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.save_weights('data/char_rnn.h5')
model.optimizer.lr=0.00001
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
model.fit(sentences, np.expand_dims(next_chars,-1), batch_size=64, epochs=1)
print_example()
print_example()
model.save_weights('data/char_rnn.h5')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preprocess and create model
Step2: Train
|
7,571
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print(text[:1000])
print(words[:80])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
from collections import Counter
import random
## Your code here
threshold = 1e-5
count_words = Counter(int_words)
total_count = len(int_words)
freqs = {word: count/total_count for word, count in count_words.items()}
p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in count_words}
train_words = [word for word in int_words if p_drop[word] < random.random()]
print(len(int_words))
print(len(train_words))
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
num_of_words = int(random.random() * window_size + 1)
start_idx = max(0, idx - num_of_words)
end_idx = min(len(words), idx + num_of_words + 1)
result = words[start_idx:idx] + words[idx+1:end_idx]
return result
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name="inputs")
labels = tf.placeholder(tf.int32, [None, None], name="labels")
n_vocab = len(int_to_vocab)
n_embedding = 300
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1)) # create embedding weight matrix here
embed = tf.nn.embedding_lookup(embedding, inputs) # use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b,
labels, embed,
n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
7,572
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib as mpl
import plotnine as p9
import matplotlib.pyplot as plt
import itertools
import warnings
warnings.simplefilter("ignore")
from sklearn import neighbors, preprocessing, impute, metrics, model_selection, linear_model, svm, feature_selection
from matplotlib.pyplot import rcParams
rcParams['figure.figsize'] = 6,6
def train_bank_to_xy(bank):
standardize and impute training
bank_sel = bank[['age','balance','duration','y']].values
X,y = bank_sel[:,:-1], bank_sel[:,-1]
scaler = preprocessing.StandardScaler().fit(X)
imputer = impute.SimpleImputer(fill_value=0).fit(X)
trans_prep = lambda Z: imputer.transform(scaler.transform(Z))
X = trans_prep(X)
y = (y == 'yes')*1
return (X, y), trans_prep
def test_bank_to_xy(bank, trans_prep):
standardize and impute test
bank_sel = bank[['age','balance','duration','y']].values
X,y = bank_sel[:,:-1], bank_sel[:,-1]
X = trans_prep(X)
y = (y == 'yes')*1
return (X, y)
bank = pd.read_csv('../../data/bank.csv',sep=';',na_values=['unknown',999,'nonexistent'])
bank.info()
bank_tr, bank_te = model_selection.train_test_split(bank,test_size=.33)
p9.ggplot(bank_tr, p9.aes(x = 'age',fill = 'y')) + p9.geom_density(alpha=.2)
(X_tr, y_tr), trans_prep = train_bank_to_xy(bank_tr)
X_te, y_te = test_bank_to_xy(bank_te, trans_prep)
def plot_conf_score(y_te,score,tau):
y_classes = (1,0)
cf_inds = ["Pred {}".format(c) for c in y_classes]
cf_cols = ["True {}".format(c) for c in y_classes]
y_pred = score_dur > tau
return pd.DataFrame(metrics.confusion_matrix(y_pred,y_te,labels=y_classes),index=cf_inds,columns=cf_cols)
score_dur = X_te[:,2]
p9.ggplot(bank_tr[['duration','y']].dropna(axis=0)) + p9.aes(x = 'duration',fill = 'y')\
+ p9.geom_density(alpha=.5)
y_te
plot_conf_score(y_te,score_dur,1.)
plot_conf_score(y_te,score_dur,2.)
## Fit and find NNs
nn = neighbors.NearestNeighbors(n_neighbors=10,metric="l2")
nn.fit(X_tr)
dists, NNs = nn.kneighbors(X_te)
NNs[1], y_tr[NNs[1]].mean(), y_te[1]
score_nn = np.array([(y_tr[knns] == 1).mean() for knns in NNs])
plot_conf_score(y_te,score_nn,.2)
nn = neighbors.KNeighborsClassifier(n_neighbors=10)
nn.fit(X_tr, y_tr)
score_nn = nn.predict_proba(X_te)[:,1]
plot_conf_score(y_te,score_nn,.2)
def print_top_k(score_dur,y_te,k_top):
ordering = np.argsort(score_dur)[::-1]
print("k: score, y")
for k, (yv,s) in enumerate(zip(y_te[ordering],score_dur[ordering])):
print("{}: {}, {}".format(k,s,yv))
if k >= k_top - 1:
break
print_top_k(score_dur,y_te,10)
plt.style.use('ggplot')
fpr_dur, tpr_dur, threshs = metrics.roc_curve(y_te,score_dur)
plt.figure(figsize=(6,6))
plt.plot(fpr_dur,tpr_dur)
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title("ROC for 'duration'")
def plot_temp():
plt.figure(figsize=(6,6))
plt.plot(fpr_dur,tpr_dur,label='duration')
plt.plot(fpr_nn,tpr_nn,label='knn')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.legend()
plt.title("ROC")
fpr_nn, tpr_nn, threshs = metrics.roc_curve(y_te,score_nn)
plot_temp()
def plot_temp():
plt.figure(figsize=(6,6))
plt.plot(rec_dur,prec_dur,label='duration')
plt.plot(rec_nn,prec_nn,label='knn')
plt.xlabel('recall')
plt.ylabel('precision')
plt.legend()
plt.title("PR curve")
prec_dur, rec_dur, threshs = metrics.precision_recall_curve(y_te,score_dur)
prec_nn, rec_nn, threshs = metrics.precision_recall_curve(y_te,score_nn)
plot_temp()
from sklearn import discriminant_analysis
## Init previous predictors list
preds = [("Duration",score_dur), ("NN", score_nn)]
## Fit and predict with LDA
lda = discriminant_analysis.LinearDiscriminantAnalysis()
lda.fit(X_tr,y_tr)
score_pred = lda.predict_log_proba(X_te)[:,1]
preds += [("LDA",score_pred)]
## Fit and predict with QDA
qda = discriminant_analysis.QuadraticDiscriminantAnalysis()
qda.fit(X_tr,y_tr)
score_pred = qda.predict_log_proba(X_te)[:,1]
preds += [("QDA",score_pred)]
def plot_pr_models(X_te, y_te, preds):
plt.figure(figsize=(6,6))
for name, score_preds in preds:
prec, rec, threshs = metrics.precision_recall_curve(y_te,score_preds)
plt.plot(rec,prec,label=name)
plt.xlabel('recall')
plt.ylabel('precision')
plt.legend()
plt.title("PR curve")
plot_pr_models(X_te, y_te, preds)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Classification 1
Step3: Evaluating a classifier
Step4: Confusion matrix and classification metrics
Step5: Comments
|
7,573
|
<ASSISTANT_TASK:>
Python Code:
from flexx import flx
%gui asyncio
flx.init_notebook()
class MyComponent(flx.JsComponent):
foo = flx.StringProp('', settable=True)
@flx.reaction('foo')
def on_foo(self, *events):
if self.foo:
window.alert('foo is ' + self.foo, + len(events))
m = MyComponent()
m.set_foo('helo')
from flexxamples.testers.find_prime import PrimeFinder
p = PrimeFinder()
p.find_prime_py(2000)
p.find_prime_js(2000) # Result is written to JS console, open F12 to see it
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In normal operation, one uses flx.launch() to fire up a browser (or desktop app) to run the JavaScript in. This is followed by flx.run() (or flx.start() for servers) to enter Flexx' main loop.
Step2: Let's use an example model
|
7,574
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv('../datasets/UCIrvineCrimeData.csv');
df = df.replace('?',np.NAN)
features = [x for x in df.columns if x not in ['state', 'community', 'communityname', 'county'
, 'ViolentCrimesPerPop']]
df.isnull().sum()
df.dropna()
df.dropna(axis=1);
#only drop rows where all columns are null
df.dropna(how='all');
# drop rows that have not at least 4 non-NaN values
df.dropna(thresh=4);
# only drop rows where NaN appear in specific columns (here :'community')
df.dropna(subset=['community']);
imr = Imputer(missing_values='NaN', strategy='mean', axis=0)
imr = imr.fit(df[features])
imputed_data = imr.transform(df[features]);
#df = df.drop(["communityname", "state", "county", "community"], axis=1)
X, y = imputed_data, df['ViolentCrimesPerPop']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0);
class SBS():
def __init__(self, estimator, features,
scoring=r2_score, test_size=0.25,
random_state=1):
self.scoring = scoring
self.estimator = estimator
self.features = features
self.test_size = test_size
self.random_state = random_state
def fit(self, X, y):
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size = self.test_size,
random_state = self.random_state)
dim = X_train.shape[1]
self.indices_ = tuple(range(dim))
self.subsets_ = [self.indices_]
score = self._calc_score(X_train, y_train, X_test, y_test, self.indices_)
self.scores_ = [score]
while dim > self.features:
scores = []
subsets = []
for p in combinations(self.indices_, r=dim-1):
score = self._calc_score(X_train, y_train, X_test, y_test, p)
scores.append(score)
subsets.append(p)
best = np.argmax(score)
self.indices_ = subsets[best]
self.subsets_.append(self.indices_)
dim -= 1
self.scores_.append(scores[best])
print self.scores_
self.k_score_ = self.scores_[-1]
return self
def transform(self, X):
return X[:, self.indices_]
def _calc_score(self, X_train, y_train, X_test, y_test, indices):
self.estimator.fit(X_train[:, indices], y_train)
y_pred = self.estimator.predict(X_test[:, indices])
score = self.scoring(y_test, y_pred)
return score
clf = LinearRegression()
sbs = SBS(clf, features=1)
sbs.fit(X_train, y_train)
k_feat = [len(k) for k in sbs.subsets_]
plt.plot(k_feat, sbs.scores_, marker='o')
plt.ylim([-1, 1])
plt.ylabel('Accuracy')
plt.xlabel('Number of Features')
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Find the number of missing values in every column
Step2: Eliminating samples or features with missing values
Step3: Similarly, we can drop columns that have atleast one NaN in any row by setting the axis argument to 1
Step4: The dropna() method supports additional parameters that can come in handy.
Step5: Imputing missing values
Step6: Sklearn fundamentals
Step7: First, we assigned the NumPy array representation of features columns to the variable X, and we assigned the predicted variable to the variable y. Then we used the train_test_split function to randomly split X and y into separate training & test datasets. By setting test_size=0.3 we assigned 30 percent of samples to X_test and the remaining 70 percent to X_train.
|
7,575
|
<ASSISTANT_TASK:>
Python Code:
import sqlite3
import pandas as pd
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
db = sqlite3.connect('L18DB_demo.sqlite')
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS candidates")
cursor.execute("DROP TABLE IF EXISTS contributors")
cursor.execute("PRAGMA foreign_keys=1")
cursor.execute('''CREATE TABLE candidates (
id INTEGER PRIMARY KEY NOT NULL,
first_name TEXT,
last_name TEXT,
middle_init TEXT,
party TEXT NOT NULL)''')
db.commit() # Commit changes to the database
cursor.execute('''CREATE TABLE contributors (
id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
last_name TEXT,
first_name TEXT,
middle_name TEXT,
street_1 TEXT,
street_2 TEXT,
city TEXT,
state TEXT,
zip TEXT,
amount REAL,
date DATETIME,
candidate_id INTEGER NOT NULL,
FOREIGN KEY(candidate_id) REFERENCES candidates(id))''')
db.commit()
with open ("candidates.txt") as candidates:
next(candidates) # jump over the header
for line in candidates.readlines():
cid, first_name, last_name, middle_name, party = line.strip().split('|')
vals_to_insert = (int(cid), first_name, last_name, middle_name, party)
cursor.execute('''INSERT INTO candidates
(id, first_name, last_name, middle_init, party)
VALUES (?, ?, ?, ?, ?)''', vals_to_insert)
with open ("contributors.txt") as contributors:
next(contributors)
for line in contributors.readlines():
cid, last_name, first_name, middle_name, street_1, street_2, \
city, state, zip_code, amount, date, candidate_id = line.strip().split('|')
vals_to_insert = (last_name, first_name, middle_name, street_1, street_2,
city, state, int(zip_code), amount, date, candidate_id)
cursor.execute('''INSERT INTO contributors (last_name, first_name, middle_name,
street_1, street_2, city, state, zip, amount, date, candidate_id)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''', vals_to_insert)
def viz_tables(cols, query):
q = cursor.execute(query).fetchall()
framelist = []
for i, col_name in enumerate(cols):
framelist.append((col_name, [col[i] for col in q]))
return pd.DataFrame.from_items(framelist)
candidate_cols = [col[1] for col in cursor.execute("PRAGMA table_info(candidates)")]
query = '''SELECT * FROM candidates'''
viz_tables(candidate_cols, query)
query = '''SELECT * FROM candidates WHERE middle_init <> ""'''
viz_tables(candidate_cols, query)
print("{} candidates have a middle initial.".format(viz_tables(candidate_cols, query).shape[0]))
query = '''SELECT * FROM candidates ORDER BY id DESC'''
viz_tables(candidate_cols, query)
query = '''SELECT last_name, party FROM candidates'''
viz_tables(['last_name', 'party'], query)
query = '''SELECT DISTINCT party FROM candidates'''
viz_tables(['party'], query)
cursor.execute('''ALTER TABLE candidates ADD COLUMN full_name TEXT''')
candidate_cols = [col[1] for col in cursor.execute("PRAGMA table_info(candidates)")]
viz_tables(candidate_cols, '''SELECT * FROM candidates''')
candidate_cols = [col[1] for col in cursor.execute("PRAGMA table_info(candidates)")] # regenerate columns with full_name
query = '''SELECT id, last_name, first_name FROM candidates''' # Select a few columns
full_name_and_id = [(attr[1] + ", " + attr[2], attr[0]) for attr in cursor.execute(query).fetchall()] # List of tuples: (full_name, id)
update = '''UPDATE candidates SET full_name = ? WHERE id = ?''' # Update the table
for rows in full_name_and_id:
cursor.execute(update, rows)
query = '''SELECT * FROM candidates'''
viz_tables(candidate_cols, query)
update = '''UPDATE candidates SET full_name = "Eventual Winner" WHERE last_name = "Obama"'''
cursor.execute(update)
update = '''UPDATE candidates SET full_name = "Eventual Loser" WHERE last_name = "McCain"'''
cursor.execute(update)
viz_tables(candidate_cols, query)
contributor_cols = [col[1] for col in cursor.execute("PRAGMA table_info(contributors)")] # You've already done this part. I just need to do it here b/c I haven't yet.
function = '''SELECT *, MAX(amount) AS max_amount FROM contributors'''
pcursor.execute(function)
query = '''SELECT * FROM candidates LIMIT 3'''
viz_tables(candidate_cols, query)
query = '''SELECT * FROM candidates LIMIT 4 OFFSET 5'''
viz_tables(candidate_cols, query)
query = '''SELECT * FROM candidates ORDER BY last_name LIMIT 4 OFFSET 5'''
viz_tables(candidate_cols, query)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will also use a basic a pandas feature to display tables in the database. Although this lecture isn't on pandas, I will still have you use it a little bit.
Step2: Now we create the tables in the database (just like last time).
Step3: <a id='step_1'></a>
Step 1
Step4: <a id='interlude'></a>
Step5: Here's how we can use our helper function. It gives a pretty nice visualization of our table. You should do the same thing with the contributors table.
Step6: <a id='step_2'></a>
Step 2
Step7: We can also see how many entries satisfy the query
Step8: Do the following queries
Step9: Do the following sorts on the contributors table
Step10: Using the DISTINCT clause, you remove duplicate rows.
Step11: Do the following
Step12: What if we want to rename or delete a columm? It can't be done with SQLite with a single command. We need to follow some roundabout steps (see SQLite ALTER TABLE). We won't consider this case at the moment.
Step13: Here's another update, this time on an existing column.
Step14: Do the following
Step15: Do the following
|
7,576
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.feature_extraction.text import CountVectorizer
corpus = [
'This is the first document.',
'This is the second second document.',
'And the third one.',
'Is this the first document?',
'The last document?',
]
vect = CountVectorizer()
vect.fit(corpus)
vect.vocabulary_
vect.transform(['This is the second document.']).toarray()
vect.transform(['Something completely new.']).toarray()
vect.transform(corpus).toarray()
vect = CountVectorizer(stop_words=["and", "is", "the", "this"]).fit(corpus)
vect.vocabulary_
vect = CountVectorizer(stop_words="english").fit(corpus)
vect.vocabulary_
vect = CountVectorizer(analyzer="char").fit(corpus) #토큰 1개가 vocaburary로 인식. 원래 기본은 word이지만 char가 들어갈 수 있다.
vect.vocabulary_
import nltk
nltk.download("punkt")
vect = CountVectorizer(tokenizer=nltk.word_tokenize).fit(corpus)
vect.vocabulary_
vect = CountVectorizer(token_pattern="t\w+").fit(corpus)
vect.vocabulary_
vect = CountVectorizer(ngram_range=(2,2)).fit(corpus)
vect.vocabulary_
vect = CountVectorizer(ngram_range=(1,2), token_pattern="t\w+").fit(corpus)
vect.vocabulary_
vect = CountVectorizer(max_df=4, min_df=2).fit(corpus)
vect.vocabulary_, vect.stop_words_
vect.transform(corpus).toarray()
vect.transform(corpus).toarray().sum(axis=0)
from sklearn.feature_extraction.text import TfidfVectorizer
tfidv = TfidfVectorizer().fit(corpus)
tfidv.transform(corpus).toarray()
from sklearn.datasets import fetch_20newsgroups
twenty = fetch_20newsgroups()
len(twenty.data)
%time CountVectorizer().fit(twenty.data).transform(twenty.data)
from sklearn.feature_extraction.text import HashingVectorizer
hv = HashingVectorizer(n_features=10)
%time hv.transform(twenty.data)
corpus = ["imaging", "image", "imagination", "imagine", "buys", "buying", "bought"]
vect = CountVectorizer().fit(corpus)
vect.vocabulary_
from sklearn.datasets import fetch_20newsgroups
twenty = fetch_20newsgroups()
docs = twenty.data[:100]
vect = CountVectorizer(stop_words="english", token_pattern="wri\w+").fit(docs)
vect.vocabulary_
from nltk.stem import SnowballStemmer
class StemTokenizer(object):
def __init__(self):
self.s = SnowballStemmer('english')
self.t = CountVectorizer(stop_words="english", token_pattern="wri\w+").build_tokenizer()
def __call__(self, doc):
return [self.s.stem(t) for t in self.t(doc)]
vect = CountVectorizer(tokenizer=StemTokenizer()).fit(docs)
vect.vocabulary_
import json
import string
from konlpy.utils import pprint
from konlpy.tag import Hannanum
hannanum = Hannanum()
req = urllib2.Request("https://www.datascienceschool.net/download-notebook/708e711429a646818b9dcbb581e0c10a/")
opener = urllib2.build_opener()
f = opener.open(req)
json = json.loads(f.read())
cell = ["\n".join(c["source"]) for c in json["cells"] if c["cell_type"] == u"markdown"]
docs = [w for w in hannanum.nouns(" ".join(cell)) if ((not w[0].isnumeric()) and (w[0] not in string.punctuation))]
vect = CountVectorizer().fit(docs)
count = vect.transform(docs).toarray().sum(axis=0)
plt.bar(range(len(count)), count)
plt.show()
pprint(zip(vect.get_feature_names(), count))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 문서 처리 옵션
Step2: 토큰(token)
Step3: n-그램
Step4: 빈도수
Step5: TF-IDF
Step6: Hashing Trick
Step7: 형태소 분석기 이용
Step8: 예
|
7,577
|
<ASSISTANT_TASK:>
Python Code:
import os
import os.path as osp
import random as rand
from pathlib import Path
import shutil as sh
import warnings
from PIL import Image
warnings.filterwarnings("ignore")
rand.seed(33)
## Inputs and Outputs
input_dir = Path("input")
outputs_dir = Path("_output")
media_dir = input_dir / 'media'
sh.rmtree(outputs_dir)
os.makedirs(outputs_dir, exist_ok=True)
def rescale(image, factor):
Helper function to show the images at the right scale
new_shape = [int(dim * factor) for dim in image.size]
new_image = image.resize(new_shape, Image.ANTIALIAS)
return new_image
# load all the images
CSN_workflow_longway_im = Image.open(media_dir / "CSN_workflow_longway.png")
walker_history_im = Image.open(media_dir / "walker_history_schematic.png")
sim_manager_dataflow_im = Image.open(media_dir / "sim_manager_dataflow.png")
trad_md_windows_im = Image.open(media_dir / "trad_MD_windows.png")
we_md_windows_im = Image.open(media_dir / "we_MD_windows.png")
continuation_runs_im = Image.open(media_dir / "continuation_runs.png")
contig_tree_im = Image.open(media_dir / "contig_tree.png")
CSN_workflow_shortway_im = Image.open(media_dir / "CSN_workflow_shortway.png")
linking_files_im = Image.open(media_dir / "linking_files.png")
import sys
from copy import copy, deepcopy
import os
import os.path as osp
import pickle
import numpy as np
import scipy.spatial.distance as scidist
import simtk.openmm.app as omma
import simtk.openmm as omm
import simtk.unit as unit
from openmm_systems.test_systems import LennardJonesPair
import mdtraj as mdj
from wepy.util.mdtraj import mdtraj_to_json_topology
from wepy.sim_manager import Manager
from wepy.resampling.distances.distance import Distance
from wepy.resampling.resamplers.wexplore import WExploreResampler
from wepy.walker import Walker
from wepy.runners.openmm import OpenMMRunner, OpenMMState
from wepy.runners.openmm import UNIT_NAMES, GET_STATE_KWARG_DEFAULTS
from wepy.work_mapper.mapper import Mapper
from wepy.boundary_conditions.unbinding import UnbindingBC
from wepy.reporter.hdf5 import WepyHDF5Reporter
from wepy.hdf5 import WepyHDF5
## PARAMETERS
# we use the Reference platform because this is just a test
PLATFORM = 'Reference'
# Langevin Integrator
TEMPERATURE= 300.0*unit.kelvin
FRICTION_COEFFICIENT = 1/unit.picosecond
# step size of time integrations
STEP_SIZE = 0.002*unit.picoseconds
# Resampler parameters
# the maximum weight allowed for a walker
PMAX = 0.5
# the minimum weight allowed for a walker
PMIN = 1e-12
# the maximum number of regions allowed under each parent region
MAX_N_REGIONS = (10, 10, 10, 10)
# the maximum size of regions, new regions will be created if a walker
# is beyond this distance from each voronoi image unless there is an
# already maximal number of regions
MAX_REGION_SIZES = (1, 0.5, .35, .25) # nanometers
# boundary condition parameters
# maximum distance between between any atom of the ligand and any
# other atom of the protein, if the shortest such atom-atom distance
# is larger than this the ligand will be considered unbound and
# restarted in the initial state
CUTOFF_DISTANCE = 1.0 # nm
# reporting parameters
# these are the properties of the states (i.e. from OpenMM) which will
# be saved into the HDF5
SAVE_FIELDS = ('positions', 'box_vectors', 'velocities')
# make a dictionary of units for adding to the HDF5
units = dict(UNIT_NAMES)
## System
# make the test system
test_sys = LennardJonesPair()
## Molecular Topology
mdtraj_topology = mdj.Topology.from_openmm(test_sys.topology)
json_str_top = mdtraj_to_json_topology(mdtraj_topology)
## Runner
# make the integrator
integrator = omm.LangevinIntegrator(TEMPERATURE,
FRICTION_COEFFICIENT,
STEP_SIZE)
# make a context and set the positions
context = omm.Context(test_sys.system,
copy(integrator))
context.setPositions(test_sys.positions)
# get the data from this context so we have a state to start the
# simulation with
get_state_kwargs = dict(GET_STATE_KWARG_DEFAULTS)
init_sim_state = context.getState(**get_state_kwargs)
init_state = OpenMMState(init_sim_state)
# initialize the runner
runner = OpenMMRunner(test_sys.system,
test_sys.topology,
integrator,
platform=PLATFORM)
## Distance Metric
# we define a simple distance metric for this system, assuming the
# positions are in a 'positions' field
class PairDistance(Distance):
def __init__(self, metric=scidist.euclidean):
self.metric = metric
def image(self, state):
return state['positions']
def image_distance(self, image_a, image_b):
dist_a = self.metric(image_a[0], image_a[1])
dist_b = self.metric(image_b[0], image_b[1])
return np.abs(dist_a - dist_b)
# make a distance object which can be used to compute the distance
# between two walkers, for our scorer class
distance = PairDistance()
## Resampler
resampler = WExploreResampler(distance=distance,
init_state=init_state,
max_region_sizes=MAX_REGION_SIZES,
max_n_regions=MAX_N_REGIONS,
pmin=PMIN, pmax=PMAX)
## Boundary Conditions
# initialize the unbinding boundary conditions
ubc = UnbindingBC(cutoff_distance=CUTOFF_DISTANCE,
initial_state=init_state,
topology=json_str_top,
ligand_idxs=np.array(test_sys.ligand_indices),
receptor_idxs=np.array(test_sys.receptor_indices))
## Work Mapper
# a simple work mapper
mapper = Mapper()
## initial walkers
n_walkers = 4
init_weight = 1.0 / n_walkers
init_walkers = [Walker(OpenMMState(init_sim_state), init_weight) for i in range(n_walkers)]
## run parameters
n_cycles = 100
n_steps = 1000
# steps for each cycle
steps = [n_steps for i in range(n_cycles)]
run1_hdf5_reporter = WepyHDF5Reporter(
file_path=str(outputs_dir / "results_run1.wepy.h5"),
mode='w',
save_fields=SAVE_FIELDS,
resampler=resampler,
boundary_conditions=ubc,
topology=json_str_top,
units=units)
sim_manager_1 = Manager(deepcopy(init_walkers),
runner=deepcopy(runner),
resampler=deepcopy(resampler),
boundary_conditions=deepcopy(ubc),
work_mapper=deepcopy(mapper),
reporters=[run1_hdf5_reporter]
)
(run1_walkers,
(run1_runner, run1_bc, run1_resampler)) = \
sim_manager_1.run_simulation(n_cycles, steps)
run2_hdf5_reporter = WepyHDF5Reporter(
file_path=str(outputs_dir / "results_run2.wepy.h5"),
mode='w',
save_fields=SAVE_FIELDS,
resampler=resampler,
boundary_conditions=ubc,
topology=json_str_top,
units=units)
# run two simulations from the initial conditions
sim_manager_2 = Manager(deepcopy(init_walkers),
runner=deepcopy(runner),
resampler=deepcopy(resampler),
boundary_conditions=deepcopy(ubc),
work_mapper=deepcopy(mapper),
reporters=[run2_hdf5_reporter]
)
(run2_walkers,
(run2_runner, run2_bc, run2_resampler)) = \
sim_manager_2.run_simulation(n_cycles, steps)
run3_hdf5_reporter = WepyHDF5Reporter(
file_path=str(outputs_dir / "results_run3.wepy.h5"),
mode='w',
save_fields=SAVE_FIELDS,
resampler=run2_resampler,
boundary_conditions=run2_bc,
topology=json_str_top,
units=units)
# run two simulations from the initial conditions
sim_manager_3 = Manager(deepcopy(init_walkers),
runner=deepcopy(run2_runner),
resampler=deepcopy(run2_resampler),
boundary_conditions=deepcopy(run2_bc),
work_mapper=deepcopy(mapper),
reporters=[run3_hdf5_reporter]
)
(run3_walkers,
(run3_runner, run3_bc, run3_resampler)) = \
sim_manager_3.run_simulation(n_cycles, steps)
rescale(CSN_workflow_longway_im, 1.0)
def pair_dist_obs(fields_d, *args, **kwargs):
atomA_coords = fields_d['positions'][:,0]
atomB_coords = fields_d['positions'][:,1]
dists = np.array([np.array([scidist.euclidean(atomA_coords[i],
atomB_coords[i])])
for i in range(atomA_coords.shape[0])
])
return dists
wepy1 = WepyHDF5(outputs_dir / 'results_run1.wepy.h5', mode='r+')
with wepy1:
# compute the observable with the function
# and automatically saving it as an extra trajectory field
obs = wepy1.compute_observable(pair_dist_obs,
['positions'],
(),
save_to_hdf5='pair_dist',
return_results=True
)
print("number of walkers:", len(obs))
print("number of cycles:", obs[0].shape[0])
print("feature vector shape:", obs[0].shape[1:])
with wepy1:
features = np.concatenate([wepy1.get_traj_field(0, i, 'observables/pair_dist')
for i in range(wepy1.num_run_trajs(0))])
print(features.shape)
from sklearn.cluster import MiniBatchKMeans
clf = MiniBatchKMeans(n_clusters=4,
batch_size=10,
random_state=1)
clf.fit(features)
print(clf.labels_.shape)
print(clf.labels_[0:10])
with wepy1:
# destructure the features
obs_trajs = []
start_idx = 0
for traj_idx in range(wepy1.num_run_trajs(0)):
num_traj_frames = wepy1.num_traj_frames(0, traj_idx)
obs_trajs.append(clf.labels_[start_idx : start_idx + num_traj_frames])
start_idx += num_traj_frames
print("observables shape:", len(obs_trajs), len(obs_trajs[0]))
# add it as an observable
wepy1.add_observable('minibatch-kmeans_pair-dist_4_10_1',
[obs_trajs])
from wepy.analysis.parents import (
parent_panel,
net_parent_table,
parent_table_discontinuities,
)
with wepy1:
# make a parent matrix from the hdf5 resampling records for run 0
resampling_panel = wepy1.run_resampling_panel(0)
# get the warping records
warping_records = wepy1.warping_records([0])
parent_panel = parent_panel(
resampler.DECISION,
resampling_panel)
parent_table = net_parent_table(parent_panel)
parent_table_discs = parent_table_discontinuities(
UnbindingBC,
parent_table,
warping_records
)
from wepy.analysis.parents import ancestors
lineage_trace = ancestors(parent_table,
100,
3
)
print(lineage_trace[0:3])
with wepy1:
lineage_fields = wepy1.get_run_trace_fields(0,
lineage_trace[:-1],
['weights',
'observables/pair_dist'])
print("weights:")
print(lineage_fields['weights'][0:3])
print("LJ-pair distance:")
print(lineage_fields['observables/pair_dist'][0:3])
rescale(walker_history_im, 1.0)
rescale(sim_manager_dataflow_im, 0.65)
with wepy1:
mdj_traj = wepy1.run_trace_to_mdtraj(0,
lineage_trace[:-1])
print(mdj_traj)
# save one of the frames as a PDB as a reference topology
mdj_traj[0].save_pdb(str(outputs_dir / 'lj-pair.pdb'))
# save the lineage as a DCD trajectory
mdj_traj.save_dcd(str(outputs_dir / 'lj-pair_walker_lineage.pdb'))
rescale(trad_md_windows_im, 0.6)
rescale(we_md_windows_im, 0.6)
from wepy.analysis.parents import sliding_window
from wepy.analysis.transitions import run_transition_probability_matrix
# use the parent matrix to generate the sliding windows
windows = list(sliding_window(np.array(parent_table_discs), 10))
# make the transition matrix from the windows
with wepy1:
transprob_mat = run_transition_probability_matrix(
wepy1,
0,
"observables/minibatch-kmeans_pair-dist_4_10_1",
windows)
print(transprob_mat)
rescale(CSN_workflow_shortway_im, 1.0)
from wepy.analysis.contig_tree import ContigTree
contigtree = ContigTree(wepy1,
decision_class=resampler.DECISION,
boundary_condition_class=UnbindingBC,
)
resampler.DECISION.ENUM.SQUASH
warp_events = contigtree.warp_trace()
final_walkers = contigtree.final_trace()
squashed_walkers = contigtree.resampling_trace(resampler.DECISION.ENUM.SQUASH)
warp_lineages = contigtree.lineages(squashed_walkers)
print(warp_lineages)
with contigtree:
contig = contigtree.make_contig(contigtree.spanning_contig_traces()[0])
print(contig)
contig_tree.sliding_windows(3)[-5:-1]
rescale(continuation_runs_im, 0.8)
rescale(contig_tree_im, 0.85)
print("Runs in this file:", wepy1.run_idxs)
print("Continuations using the API method:\n", wepy1.continuations)
print("Where it is actually stored in the HDF5:\n", wepy1.h5['_settings/continuations'][:])
print("Contig {} has {} frames".format([0], wepy1.contig_n_cycles([0])))
print("Contig {} has {} frames".format([1], wepy1.contig_n_cycles([1])))
print("Contig {} has {} frames".format([2], wepy1.contig_n_cycles([2])))
print("Contig {} has {} frames".format([0,1], wepy1.contig_n_cycles([0,1])))
print("Contig {} has {} frames".format([0,2], wepy1.contig_n_cycles([0,2])))
#wepy1.resampling_records_dataframe([0,1])
spanning_contigs = wepy1.spanning_contigs()
print("The spanning contigs:", spanning_contigs)
from wepy.analysis.network import MacroStateNetwork
random_state_net = MacroStateNetwork(contig_tree, transition_lag_time=3,
assg_field_key="observables/rand_assg_idx")
# compute the weights of the macrostates and set them as node attributes
random_state_net.set_nodes_field('Weight', random_state_net.macrostate_weights())
# get the weight of a node
print(random_state_net.graph.node[39]['Weight'])
print(random_state_net.probmat)
from csnanalysis.csn import CSN
from csnanalysis.matrix import *
csn = CSN(random_state_net.countsmat, symmetrize=True)
from scipy.spatial.distance import euclidean
dists = []
for node_id, field_d in random_state_net.iter_nodes_fields(['positions']).items():
# just use the positions of the first frame in the cluster
pos_A, pos_B = field_d['positions'][0][0], field_d['positions'][0][1]
dist = euclidean(pos_A, pos_B)
dists.append(dist)
# the sink basins are those close to the unbinding cutoff
sink_basin = [int(i) for i in np.argwhere(np.array(dists) > 2.5)]
# the source basins are where they are close together
source_basin = [int(i) for i in np.argwhere(np.array(dists) < 0.37)]
print("Number of sink states:", len(sink_basin))
print("Number of source states:", len(source_basin))
committor_probabilities = csn.calc_committors([source_basin, sink_basin])
committor_probabilities[39]
node8_trace = random_state_net.node_assignments(8)
print(node8_trace)
# get an mdtraj trajectory object from the microstates in a node
node8_traj = random_state_net.state_to_mdtraj(8)
node8_traj.superpose(node8_traj)
print("{} frames in macrostate {}".format(node8_traj.n_frames, 8))
import nglview as nv
view = nv.show_mdtraj(node8_traj)
view
rescale(linking_files_im, 1.0)
wepy2 = WepyHDF5(wepy2_path, mode='r')
wepy3 = WepyHDF5(wepy3_path, mode='r')
with wepy2:
print("File 2 runs:", wepy2.run_idxs)
with wepy3:
print("File 3 runs:", wepy3.run_idxs)
# now we are going to link them all under one linker file
linker_h5_path = osp.join(outputs_dir, "all_runs.wepy.h5")
with wepy1:
all_wepy = wepy1.clone(linker_h5_path, mode='w')
with all_wepy:
# link the whole file for wepy1 to preserve the continuations
wepy1_run_idxs = all_wepy.link_file_runs(wepy1_path)
# do the same for wepy2 just to test it on only one run in a file and
# because it is easier
wepy2_run_idxs = all_wepy.link_file_runs(wepy2_path)
# now we need to link the run from the continuation file for wepy2 and
# add in the continuation records
all_wepy.link_run(wepy3_path, 0, continue_run=wepy2_run_idxs[0])
print(all_wepy.spanning_contigs())
all_wepy.open()
wepy1.open()
wepy2.open()
wepy3.open()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to Weighted Ensemble Data Analysis in wepy
Step2: Running the simulation
Step3: First Simulation
Step4: Second Simulation
Step5: Third Simulation
Step6: Analysis Workflow
Step7: Calculating Observables
Step8: When we run compute_observable it both returns the values and saves them into the HDF5 file. Depending on your workflow you may favor one over the other. In the end having the observables saved in the same HDF5 file makes indexing them easier and allows for some fancy queries when you rearrange the data into networks and trees.
Step9: Clustering Simulation Samples
Step10: Then we can choose our classifier hyperparameters and train it
Step11: Once we have the trained classifier and we don't have a test set, we can just use the already determined labels for the feature vectors and then assign this to the simulation dataset as it's own observable. To do this we just need to destructure the feature set back into trajectory shapes and use the add_observable method on our WepyHDF5 dataset
Step12: Thats it for clustering!
Step13: Side Notes on Lineages and Continuous Trajectories
Step14: In this trace the tuples are complex indices where the values are
Step15: Notice that we are excluding the last entry in the trace. Is this an error? It is not, but sometimes the indexes can be confusing. When trying to debug these kinds of issues, it can be very useful to have these two diagrams handy as a reference to the order of events taking place in the simulation.
Step16: In this diagram we show the actual flow of data in the wepy simulation manager and components.
Step17: To explain the wrinkle in the trace indexing above we must consider that the parent table above was generated from all of the resampling records available. Resampling occurs after dynamics is finished, and as the name implies generates no novel samples. And so rather than recording frames twice in the HDF5 before and after sampling we simply record the action of what happens.
Step18: Also note that these trajectories do not include the actual initial structures that started the simulations. These too can be accessed if you need them via the initial_walker_fields and initial_walkers_to_mdtraj methods.
Step19: Now consider a branchpoint in a tree of trajectories. We must be very careful not to double count or undercount transitions.
Step20: The algorithm to achieve this has been implemented in the sliding_window function which generates traces corresponding to the red segments in the above figures.
Step21: These window traces can then be combined with cluster labels (as an observable) to produce a transition probability matrix directly. See the wepy.analysis.transitions module for other similar functions if you want to control how you are aggregating forward and backward transitions.
Step22: Contig Trees
Step23: As you can see most of those complex intermediary formats are folded into the ContigTree representation. The ContigTre is a combination of a network data structure (networkx.DiGraph) and the WepyHDF5 data.
Step24: What have we done? Well the ContigTree has an extensive API for a lot of stuff so lets try to focus in on a few interesting ones before we get to the transition probabilities.
Step25: And then we can take these events and generate all of the full lineages for them in one line.
Step26: Since we only have a single run our contig tree is really just a single contig which we can get with this little bit of magic
Step27: This Contig object is a subclass of the ContigTree but isn't a tree. This restriction opens a few more doors to us that we will use, but for now it makes the abstraction simpler.
Step28: Multi-Run Data Analysis
Step29: We want to be able to get transitions between the original run and all of its continuations without double counting.
Step30: The Spanning Contigs are the contigs drawn before
Step31: Run 1 continues run 0, and run 2 continues run 0.
Step32: A spanning contig is a contig which goes from a root run (a run that does not continue another run) and a leaf run (a run which is not continued). These can be enumerated
Step33: The Contig Tree Class
Step34: Because this object directly wraps the WepyHDF5 file, we have access to all the data including weights, positions, box sizes etc. and so we can perform all kinds of fancy operations on a macrostate basis.
Step35: Transition Probabilitiy Matrix
Step36: Updated User Workflow
Step37: Committor Probabilities
Step38: Determine the source and sink basins and compute the committor probabilities
Step39: Microstates in the Macrostate
Step40: Visualizing Microstates
Step41: Linking Runs Together
Step42: Code
|
7,578
|
<ASSISTANT_TASK:>
Python Code:
'{} + {} = {}'.format(10, 10, 20)
'{0} + {1} = {2}'.format(10, 10, 20) # esse é o padrão
'{0} + {0} = {1}'.format(10, 20)
'{1} + {0} = {2}'.format(30, 20, 10) # evite fazer isso para não causar confusão
string = '{cidade} é muito bonito(a) durante o(a) {estação}'
string.format(cidade='Bruxelas', estação='Inverno')
'O total é R${0}'.format(59.8912313)
'O total é R${0:.2f}'.format(59.8912313)
'A porcentagem é de {0:.2%}'.format(0.8912313)
'{0:<10} | {1:<10} | {2:<10}'.format('Qtd.', 'Cor', 'Valor') # alinhado à esquerda
'{0:>10} | {1:>10} | {2:>10}'.format('Qtd.', 'Cor', 'Valor') # alinhado à direita
'{0:^6} | {1:^9} | {2:^10}'.format('Qtd.', 'Cor', 'Valor') # centralizado
'{0:+^6} | {1:=^9} | {2:-^10}'.format('Qtd.', 'Cor', 'Valor') # centralizado
'{0:+^6} | {1:=^9} | {2:-^10}'.format('Qtd.', 'Cor', 'Valor') # centralizado
formato_tabela = '{0:^6} | {1:^9} | {2:^10}'
formato_tabela
produtos = [
(2, 'Amarelo', 18.50),
(5, 'Verde', 48.50),
(2, 'Azul', 78.50),
]
produtos
print(formato_tabela.format('Qtd.', 'Cor', 'Valor R$'))
for qtd, cor, valor in produtos:
print(formato_tabela.format(qtd, cor, valor))
'{0:e} {0:f} {0:%}'.format(.0000031)
import math
format(math.pi, '6.3f')
format('Python', '.<12')
format('Python', '.>12')
format('Python', '.^12')
l = ['spam', 'spam', 'eggs', 'spam']
l
set(l)
A = set()
len(a)
A = {5, 4, 3, 3, 2, 10}
A
len(A)
sum(A)
max(A)
min(A)
A = {4, 5, 1, 3, 4, 5, 7}
A # ordem diferente da declarada!
A[0]
for num in A:
print(num)
tuple(A)
tuple(A)[0]
list(A)
list(A)[-1]
{letra for letra in 'abrakadabraalakazam'}
{numero for numero in range(30) if numero % 3 != 0}
A = {-10, 0, 10, 20, 30, 40}
A
5 in A
-10 in A
import timeit
tempo = timeit.timeit('[math.exp(x) for x in range(10)]', setup='import math')
tempo
tempo / 1000000
import timeit
import random
# PS: esse código demora para ser executado
vezes = 1000
print('tamanho | tempo da lista | tempo do set | list vs set')
tamanhos = (10 ** i for i in range(3, 8))
for tamanho in tamanhos: # cria um gerador com os valores 10^3, 10^4, ..., 10^7
setup_lista = 'l = list(range({}))'.format(tamanho)
tempo = timeit.timeit('9999999 in l', setup=setup_lista, number=vezes)
media_lista = tempo / vezes
setup_set = 's = set(range({}))'.format(tamanho)
tempo = timeit.timeit('9999999 in s', setup=setup_set, number=vezes)
media_set = tempo / vezes
msg = '{:<9}| {:<15}| {:<13}| set é {:<}x + rápido'
msg = msg.format(tamanho, round(media_lista, 8), round(media_set, 8),
round(media_lista / media_set))
print(msg)
A = {2, 3, 4}
B = {3, 5, 7}
A | B
A.union(B)
A = {2, 3, 4}
B = {3, 5, 7}
A & B
A.intersection(B)
A = {2, 3, 4}
B = {3, 5, 7}
A - B
A = {2, 3, 4}
B = {3, 5, 7}
A.difference(B)
A = {2, 3, 4}
B = {3, 5, 7}
A ^ B
A.symmetric_difference(B)
from faker import Factory
factory = Factory.create('pt_BR') # criando fábrica de dados falsos português brasileiro
nomes = {factory.name() for _ in range(10000)}
nomes
buscas = {'João Silva', 'Ana Ferreira', 'Eduardo Santos', 'Pedro Alves', 'Enzo Correira'}
presentes = [busca for busca in buscas if busca in nomes]
presentes
ausentes = [busca for busca in buscas if busca not in nomes]
ausentes
buscas & nomes
buscas - nomes
# tamanho | tempo set + for | tempo set + & | for vs &
# 100 | 3.945e-05 | 1.86e-06 | & é 21.25x + rápido
# 1000 | 5.844e-05 | 1.751e-05 | & é 3.34x + rápido
# 10000 | 0.00014848 | 5.991e-05 | & é 2.48x + rápido
# 100000 | 0.00015138 | 8.862e-05 | & é 1.71x + rápido
# 1000000 | 0.00014647 | 8.113e-05 | & é 1.81x + rápido
votos = {'joão': 10, 'maria': 25, 'ana': 40, 'pedro': 75}
votos['joão']
votos['joão'] = 11
votos
len(votos)
del votos['joão']
votos
votos['joão']
print(votos.get('joão'))
votos.get('joão', 0)
candidatos = list(votos.keys()) + ['joão', 'muriel', 'marcola']
candidatos
for candidato in candidatos:
print('{} recebeu {} votos.'.format(candidato, votos.get(candidato, 0)))
votos
'ana' in votos
'penélope' in votos
len(votos) # número de items no dict
outros_votos = {'milena': 100, 'mário': 1}
votos.update(outros_votos) # atualiza o dicionário votos com os items de outros_votos
votos
votos.keys()
['maria', 'adelaide'] & votos.keys()
['maria', 'adelaide'] - votos.keys()
votos.values()
votos.items()
[('jean', 50), ('maria', 25)] & votos.items()
[('jean', 50), ('maria', 25)] - votos.items()
for nome in votos.keys():
print(nome)
for qtd_votos in votos.values():
print(qtd_votos)
for nome, qtd_votos in votos.items(): # atribuição múltipla, lembra?
print('{} recebeu {} votos.'.format(nome.capitalize(), qtd_votos))
# não mude esse código, ele que gera os votos para você testar seu programa
from faker import Factory
import random
factory = Factory.create('pt_BR')
# usa distribuição de gauss para gerar quantidade de votos
votos = {factory.name(): abs(round(random.gauss(0, .2) * 10000)) for _ in range(333)}
# deixa nomes completos com somente dois nomes
votos = {nome: votos for nome, votos in votos.items() if len(nome.split()) == 2}
def media(votos):
...
from faker import Factory
factory = Factory.create('pt_BR')
cpfs = {factory.name(): factory.cpf() for _ in range(10)}
cpfs
{numero: numero ** 2 for numero in range(10)}
telefones = {'joão': '9941', 'ana': '9103', 'maria': '9301', 'pedro': '9203'}
telefones
nomes = {}
for nome, telefone in telefones.items():
nomes[telefone] = nome
nomes
{telefone: nome for nome, telefone in telefones.items()}
sorted(telefones.items())
dict(sorted(telefones.items()))
def pega_segundo_elemento(tupla):
return tupla[1]
sorted(telefones.items(), key=pega_segundo_elemento)
pega_segundo_elemento = lambda x: x[1]
sorted(telefones.items(), key=pega_segundo_elemento)
sorted(telefones.items(), key=lambda x: x[1])
from operator import itemgetter
sorted(telefones.items(), key=itemgetter(1))
from collections import defaultdict
def conta_palavras(frase):
contagem = {}
...
return contagem
# rode o código abaixo para testar a corretude de seu programa
assert conta_palavras("quod dolore dolore dolore modi sapiente quod ullam nostrum ullam") == {'ullam': 2, 'sapiente': 1, 'quod': 2, 'nostrum': 1, 'dolore': 3, 'modi': 1}
assert conta_palavras("soluta Soluta sapiente sapiente nostrum Sapiente dolore nostrum modi ullam") == {'ullam': 1, 'sapiente': 3, 'nostrum': 2, 'dolore': 1, 'soluta': 2, 'modi': 1}
assert conta_palavras("quod dolore dolore soluta sapiente sapiente dolore quod sapiente modi") == {'dolore': 3, 'quod': 2, 'soluta': 1, 'modi': 1, 'sapiente': 3}
assert conta_palavras("dolore Dolore quis quod dolore nostrum quod Nostrum sapiente soluta") == {'sapiente': 1, 'quod': 2, 'nostrum': 2, 'soluta': 1, 'dolore': 3, 'quis': 1}
assert conta_palavras("sapiente sapiente quod soluta quis ullam nostrum soluta ullam ullam") == {'ullam': 3, 'sapiente': 2, 'quod': 1, 'nostrum': 1, 'soluta': 2, 'quis': 1}
assert conta_palavras("modi Sapiente dolore Soluta sapiente quis soluta modi dolore ullam") == {'ullam': 1, 'sapiente': 2, 'quis': 1, 'dolore': 2, 'soluta': 2, 'modi': 2}
assert conta_palavras("quis quis nostrum nostrum sapiente quis nostrum quod quis dolore") == {'sapiente': 1, 'quod': 1, 'quis': 4, 'nostrum': 3, 'dolore': 1}
assert conta_palavras("nostrum sapiente quis ullam ullam quod ullam nostrum ullam soluta") == {'ullam': 4, 'sapiente': 1, 'quod': 1, 'nostrum': 2, 'soluta': 1, 'quis': 1}
assert conta_palavras("sapiente ullam quod quis dolore modi Quis quod dolore nostrum") == {'ullam': 1, 'sapiente': 1, 'quod': 2, 'quis': 2, 'nostrum': 1, 'dolore': 2, 'modi': 1}
assert conta_palavras("modi nostrum ullam Quis Soluta modi quis ullam modi ullam") == {'ullam': 3, 'soluta': 1, 'modi': 3, 'nostrum': 1, 'quis': 2}
def comprime_chaves_dict(dicionario):
...
assert comprime_chaves_dict({'molestias': 3950, 'tempore': 'possimus', 'rerum': 1200}) == {'tmpr': 'possimus', 'mlsts': 3950, 'rrm': 1200}
assert comprime_chaves_dict({'nam': 5300, 'minus': 3700, 'fugit': 8600}) == {'mns': 3700, 'nm': 5300, 'fgt': 8600}
assert comprime_chaves_dict({'magnam': 2850, 'quam': 2300, 'asperiores': 7750}) == {'qm': 2300, 'sprrs': 7750, 'mgnm': 2850}
assert comprime_chaves_dict({'quos': 'dignissimos', 'qui': 1700, 'repellendus': 'aut'}) == {'rpllnds': 'aut', 'q': 1700, 'qs': 'dignissimos'}
assert comprime_chaves_dict({'quaerat': 9850, 'magni': 8600, 'blanditiis': 'optio'}) == {'mgn': 8600, 'qrt': 9850, 'blndts': 'optio'}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Porém podemos especificar explicitamente as posições que queremos substituir
Step2: Podemos repetir um único argumento
Step3: Informar uma ordem arbritária
Step4: Também é possível dar nomes a esses campos
Step5: Também podemos formatar números
Step6: format() também permite alinhar elementos dentro de um espaço de 10 caracteres
Step7: O tamanho do espaço pode ser diferente nos elementos
Step8: Também é possível mudar o caracter de preenchimento do espaço dado de caracter em branco para algum outro
Step9: Também podemos armazenar format e reaplicá-los para valores diferentes
Step10: Também é possível especificar o tipo dos dados
Step11: Os formatos possíveis são
Step12: Diferentemente da função str.format() a função embutida format() não permite substituição de caracteres usando chaves.
Step13: Como podemos ver a sintaxe de set - {1}, {1, 2}, etc. - se parece exatamente com a notação matemática, com exceção que não existe uma notação para set vazio. Caso você precise criar um conjunto vazio use
Step14: Vale lembrar que conjuntos também se comportam como sequências, por conta disso é possível usar neles as funções que aprendemos anteriormente
Step15: Um ponto importante a se observar é que conjunto não mantém a ordem dos elementos
Step16: Por isso não é possível acessar os elementos pela posição
Step17: É possível acessar seus elementos iterando o set
Step18: Ou convertendo-o para tupla ou lista
Step19: Assim como listas os conjuntos também possuem um mecanismo simplificado para construir conjuntos o set comprehension
Step20: Outra característica importante de conjuntos é que eles realizam verificações de pertencimento de forma muito mais eficiente.
Step21: Para demonstrar que conjuntos verificam se um elemento pertence em uma coleção de maneira mais rápida isso vamos usar o módulo timeit que oferece uma maneira simples de contar o tempo de execução de código Python.
Step22: O código acima executa primeiro o setup import math depois o código [math.exp(x) for x in range(10)], que cria uma lista com os exponencianciais de 0 a 9, 1000000 vezes.
Step23: Sabendo disso agora podemos calcular o tempo de verificação de um elemento em lista e set
Step24: Esse código usa alguns recursos mais avançados de formatação de string. Para entender melhor o que é feito verifique a documentação oficial do assunto.
Step25: <center>Intersecção $${A \cap B}$$</center>
Step26: <center>Diferença $${A - B}$$</center>
Step27: <center>Diferença simétrica $${A \bigtriangleup B}$$</center>
Step28: Ok, essas funções são legais, mas eu já vi isso no ensino médio e nunca usei na minha vida, como isso vai ser útil para mim? (pelo menos foi isso que eu pensei ao ver isso)
Step29: Agora vamos supor que temos uma lista de nomes e queremos conferir se eles estão no conjunto de nomes. Normalmente faríamos
Step30: Porém se usarmos operações de conjunto podemos fazer isso de forma mais simples e eficiente.
Step31: Os nomes que não estão
Step32: Comparando o tempo de execução das buscas usando for contra buscas usando intersecção obtemos o seguinte resultado
Step33: dict
Step34: Como podem ver aqui os dicionários não mantém a ordem de seus elementos. Criamos o dict com os elementos {'joão'
Step35: Vale notar que ao tentar acessar um elemento não existente é levantada uma exceção
Step36: As vezes pode ser necessário evitar esse tipo de comportamento. Isso pode ser feito usando a função
Step37: também é possível estabelecer um valor para ser retornado caso a chave não seja encontrada
Step38: Por exemplo se você quiser contabilizar os votos de uma eleição em que nem todos os candidatos receberam votos e, portanto, não aparecem no dicionário votos
Step39: Podemos verificar se alguma chave existe no dicionário com o in
Step40: Para acessar somente as chaves de um dicionário fazemos
Step41: Percebe-se que o retorno não é uma lista de chaves, mas sim um dict_keys. Não vou entrar em detalhes, mas esse dict_keys - junto com dict_values e dict_items que serão mostrados mais a frente - se comportam como conjunto, por tanto verificações de pertencimento são muito eficientes, além de suportar algumas operações de conjuntos
Step42: Para acessar somente os valores do dicionários
Step43: Como os valores não são únicos o dict_values não pode se comportar como conjunto, por esse motivo ele não possui as operações de conjuntos
Step44: Porém o dict.items() implementa as operações de conjuntos, pois a dupla chave e valor são únicas
Step45: Revendo iteração de dicionários
Step46: Exercício
Step47: Assim como listas e sets, dicionários também possuem uma maneira de criar dicts com facilidade
Step48: Acontece que dict comprehension nos dá uma maneira muito bonilinda de inverter as chaves e valores de dicionários
Step49: Normalmente faríamos
Step50: Mas com dict comprehension é muito mais fácil
Step51: Ordenando um dict pelas chaves
Step52: Não faz snetido obter dict pq dict nào mantém a ordem.
Step53: Para ordernar pelos valores precisamos "avisar" a função sorted() para usar o valor como a chave. Isso é feito passando um argumento key que recebe uma função que é usada para pegar o valor que será usado na ordenação. Nesse caso precisamos criar uma função que retorna o segundo elemento de nossa lista de tuplas chave e valor, referente a esse último.
Step54: Adendo
Step55: Geralmente o uso de funções anônimas é feito assim
Step56: Porém, o jeito mais eficiente de realizar esse tipo de operação é utilizando a biblioteca operator que implementa os operadores do python de forma eficiente
Step57: A função itemgetter() da biblioteca operator faz o mesmo que a função pega_segundo_elemento() mas de forma muito mais rápida. Para saber mais sobre operator veja sua documentação.
Step58: Escreva uma função comprime_chaves_dict() que receba um dict e remova as vogais das chaves de um dicionário. Por exempo o dict {'foo'
|
7,579
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
%matplotlib inline
# path = "data/state/"
path = "data/state/sample/"
from importlib import reload # Python 3
import utils; reload(utils)
from utils import *
from IPython.display import FileLink
batch_size=64
#batch_size=1
%cd data/state
%cd train
%mkdir ../sample
%mkdir ../sample/train
%mkdir ../sample/valid
for d in glob('c?'):
os.mkdir('../sample/train/'+d)
os.mkdir('../sample/valid/'+d)
from shutil import copyfile
g = glob('c?/*.jpg')
shuf = np.random.permutation(g)
for i in range(1500): copyfile(shuf[i], '../sample/train/' + shuf[i])
%cd ../valid
g = glob('c?/*.jpg')
shuf = np.random.permutation(g)
for i in range(1000): copyfile(shuf[i], '../sample/valid/' + shuf[i])
%cd ../../../..
%mkdir data/state/results
%mkdir data/state/sample/test
batches = get_batches(path+'train', batch_size=batch_size)
val_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=False)
(val_classes, trn_classes, val_labels, trn_labels, val_filenames, filenames,
test_filename) = get_classes(path)
steps_per_epoch = int(np.ceil(batches.samples/batch_size))
validation_steps = int(np.ceil(val_batches.samples/(batch_size*2)))
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch, epochs=2, validation_data=val_batches,
validation_steps=validation_steps)
model.summary()
10*3*224*224
np.round(model.predict_generator(batches, int(np.ceil(batches.samples/batch_size)))[:10],2)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch, epochs=2, validation_data=val_batches,
validation_steps=validation_steps)
model.optimizer.lr=0.001
model.fit_generator(batches, steps_per_epoch, epochs=4, validation_data=val_batches,
validation_steps=validation_steps)
rnd_batches = get_batches(path+'valid', batch_size=batch_size*2, shuffle=True)
val_res = [model.evaluate_generator(rnd_batches, int(np.ceil(rnd_batches.samples/(batch_size*2)))) for i in range(10)]
np.round(val_res, 2)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(10, activation='softmax', kernel_regularizer=l2(0.01))
])
model.compile(Adam(lr=10e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch, epochs=2, validation_data=val_batches,
validation_steps=validation_steps)
model.optimizer.lr=0.001
model.fit_generator(batches, steps_per_epoch, epochs=4, validation_data=val_batches,
validation_steps=validation_steps)
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Flatten(),
Dense(100, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch, epochs=2, validation_data=val_batches,
validation_steps=validation_steps)
model.optimizer.lr = 0.01
model.fit_generator(batches, steps_per_epoch, epochs=5, validation_data=val_batches,
validation_steps=validation_steps)
def conv1(batches):
model = Sequential([
BatchNormalization(axis=1, input_shape=(3,224,224)),
Conv2D(32,(3,3), activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Conv2D(64,(3,3), activation='relu'),
BatchNormalization(axis=1),
MaxPooling2D((3,3)),
Flatten(),
Dense(200, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(batches, steps_per_epoch, epochs=2, validation_data=val_batches,
validation_steps=validation_steps)
model.optimizer.lr = 0.001
model.fit_generator(batches, steps_per_epoch, epochs=4, validation_data=val_batches,
validation_steps=validation_steps)
return model
conv1(batches)
gen_t = image.ImageDataGenerator(width_shift_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(height_shift_range=0.05)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(shear_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(rotation_range=15)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(channel_shift_range=20)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
gen_t = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
batches = get_batches(path+'train', gen_t, batch_size=batch_size)
model = conv1(batches)
model.optimizer.lr = 0.0001
model.fit_generator(batches, steps_per_epoch, epochs=5, validation_data=val_batches,
validation_steps=validation_steps)
model.fit_generator(batches, steps_per_epoch, epochs=25, validation_data=val_batches,
validation_steps=validation_steps)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create sample
Step2: Create batches
Step3: Basic models
Step4: As you can see below, this training is going nowhere...
Step5: Let's first check the number of parameters to see that there's enough parameters to find some useful relationships
Step6: Over 1.5 million parameters - that should be enough. Incidentally, it's worth checking you understand why this is the number of parameters in this layer
Step7: Since we have a simple model with no regularization and plenty of parameters, it seems most likely that our learning rate is too high. Perhaps it is jumping to a solution where it predicts one or two classes with high confidence, so that it can give a zero prediction to as many classes as possible - that's the best approach for a model that is no better than random, and there is likely to be where we would end up with a high learning rate. So let's check
Step8: Our hypothesis was correct. It's nearly always predicting class 1 or 6, with very high confidence. So let's try a lower learning rate
Step9: Great - we found our way out of that hole... Now we can increase the learning rate and see where we can get to.
Step10: We're stabilizing at validation accuracy of 0.39. Not great, but a lot better than random. Before moving on, let's check that our validation set on the sample is large enough that it gives consistent results
Step11: Yup, pretty consistent - if we see improvements of 3% or more, it's probably not random, based on the above samples.
Step12: Looks like we can get a bit over 50% accuracy this way. This will be a good benchmark for our future models - if we can't beat 50%, then we're not even beating a linear model trained on a sample, so we'll know that's not a good approach.
Step13: Not looking very encouraging... which isn't surprising since we know that CNNs are a much better choice for computer vision problems. So we'll try one.
Step14: The training set here is very rapidly reaching a very high accuracy. So if we could regularize this, perhaps we could get a reasonable result.
Step15: Height shift
Step16: Random shear angles (max in radians) -
Step17: Rotation
Step18: Channel shift
Step19: And finally, putting it all together!
Step20: At first glance, this isn't looking encouraging, since the validation set is poor and getting worse. But the training set is getting better, and still has a long way to go in accuracy - so we should try annealing our learning rate and running more epochs, before we make a decisions.
Step21: Lucky we tried that - we starting to make progress! Let's keep going.
|
7,580
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, fit_dipole
from mne.datasets.brainstorm import bst_phantom_elekta
from mne.io import read_raw_fif
from mayavi import mlab
print(__doc__)
data_path = bst_phantom_elekta.data_path()
raw_fname = op.join(data_path, 'kojak_all_200nAm_pp_no_chpi_no_ms_raw.fif')
raw = read_raw_fif(raw_fname)
events = find_events(raw, 'STI201')
raw.plot(events=events)
raw.info['bads'] = ['MEG2421']
raw.plot_psd(tmax=60., average=False)
raw.fix_mag_coil_types()
raw = mne.preprocessing.maxwell_filter(raw, origin=(0., 0., 0.))
raw.filter(None, 40., fir_design='firwin')
raw.plot(events=events)
tmin, tmax = -0.1, 0.1
event_id = list(range(1, 33))
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, baseline=(None, -0.01),
decim=3, preload=True)
epochs['1'].average().plot(time_unit='s')
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=None)
mne.viz.plot_alignment(raw.info, subject='sample',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain'])
cov = mne.compute_covariance(epochs, tmax=0)
data = []
for ii in event_id:
evoked = epochs[str(ii)].average()
idx_peak = np.argmax(evoked.copy().pick_types(meg='grad').data.std(axis=0))
t_peak = evoked.times[idx_peak]
evoked.crop(t_peak, t_peak)
data.append(evoked.data[:, 0])
evoked = mne.EvokedArray(np.array(data).T, evoked.info, tmin=0.)
del epochs, raw
dip = fit_dipole(evoked, cov, sphere, n_jobs=1)[0]
actual_pos, actual_ori = mne.dipole.get_phantom_dipoles()
actual_amp = 100. # nAm
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(6, 7))
diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1))
print('mean(position error) = %s' % (np.mean(diffs),))
ax1.bar(event_id, diffs)
ax1.set_xlabel('Dipole index')
ax1.set_ylabel('Loc. error (mm)')
angles = np.arccos(np.abs(np.sum(dip.ori * actual_ori, axis=1)))
print('mean(angle error) = %s' % (np.mean(angles),))
ax2.bar(event_id, angles)
ax2.set_xlabel('Dipole index')
ax2.set_ylabel('Angle error (rad)')
amps = actual_amp - dip.amplitude / 1e-9
print('mean(abs amplitude error) = %s' % (np.mean(np.abs(amps)),))
ax3.bar(event_id, amps)
ax3.set_xlabel('Dipole index')
ax3.set_ylabel('Amplitude error (nAm)')
fig.tight_layout()
plt.show()
def plot_pos_ori(pos, ori, color=(0., 0., 0.)):
mlab.points3d(pos[:, 0], pos[:, 1], pos[:, 2], scale_factor=0.005,
color=color)
mlab.quiver3d(pos[:, 0], pos[:, 1], pos[:, 2],
ori[:, 0], ori[:, 1], ori[:, 2],
scale_factor=0.03,
color=color)
mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces=[])
# Plot the position and the orientation of the actual dipole
plot_pos_ori(actual_pos, actual_ori, color=(1., 0., 0.))
# Plot the position and the orientation of the estimated dipole
plot_pos_ori(dip.pos, dip.ori, color=(0., 0., 1.))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The data were collected with an Elekta Neuromag VectorView system at 1000 Hz
Step2: Data channel array consisted of 204 MEG planor gradiometers,
Step3: The data have strong line frequency (60 Hz and harmonics) and cHPI coil
Step4: Let's use Maxwell filtering to clean the data a bit.
Step5: We know our phantom produces sinusoidal bursts below 25 Hz, so let's filter.
Step6: Now we epoch our data, average it, and look at the first dipole response.
Step7: Let's use a sphere head geometry model and let's see the coordinate
Step8: Let's do some dipole fits. We first compute the noise covariance,
Step9: Now we can compare to the actual locations, taking the difference in mm
Step10: Let's plot the positions and the orientations of the actual and the estimated
|
7,581
|
<ASSISTANT_TASK:>
Python Code:
7 **4
s = 'Hi there Sam!'
s.split()
planet = "Earth"
diameter = 12742
print("The diameter of {} is {} kilometers.".format(planet,diameter))
lst = [1,2,[3,4],[5,[100,200,['hello']],23,11],1,7]
lst[3][1][2][0]
d = {'k1':[1,2,3,{'tricky':['oh','man','inception',{'target':[1,2,3,'hello']}]}]}
d['k1'][3]['tricky'][3]['target'][3]
# Tuple is immutable
def domainGet(email):
return email.split('@')[-1]
domainGet('user@domain.com')
def findDog(st):
return 'dog' in st.lower().split()
findDog('Is there a dog here?')
def countDog(st):
count = 0
for word in st.lower().split():
if word == 'dog':
count += 1
return count
countDog('This dog runs faster than the other dog dude!')
seq = ['soup','dog','salad','cat','great']
list(filter(lambda word: word[0]=='s',seq))
def caught_speeding(speed, is_birthday):
if is_birthday:
speeding = speed - 5
else:
speeding = speed
if speeding > 80:
return 'Big Ticket'
elif speeding > 60:
return 'Small Ticket'
else:
return 'No Ticket'
caught_speeding(81,True)
caught_speeding(81,False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Split this string
Step2: Given the variables
Step3: Given this nested list, use indexing to grab the word "hello"
Step4: Given this nest dictionary grab the word "hello". Be prepared, this will be annoying/tricky
Step5: What is the main difference between a tuple and a list?
Step6: Create a function that grabs the email website domain from a string in the form
Step7: Create a basic function that returns True if the word 'dog' is contained in the input string. Don't worry about edge cases like a punctuation being attached to the word dog, but do account for capitalization.
Step8: Create a function that counts the number of times the word "dog" occurs in a string. Again ignore edge cases.
Step9: Use lambda expressions and the filter() function to filter out words from a list that don't start with the letter 's'. For example
Step10: Final Problem
|
7,582
|
<ASSISTANT_TASK:>
Python Code:
import gzip
import cPickle
import numpy as np
import theano
import theano.tensor as T
import lasagne
# Load the pickle file for the MNIST dataset.
dataset = 'data/mnist.pkl.gz'
f = gzip.open(dataset, 'rb')
train_set, dev_set, test_set = cPickle.load(f)
f.close()
#train_set contains 2 entries, first the X values, second the Y values
train_x, train_y = train_set
dev_x, dev_y = dev_set
test_x, test_y = test_set
def build_mlp(n_in, n_hidden, n_out, input_var=None):
#Input layer, 1 dimension = number of samples, 2 dimension = input, our 28*28 image
l_in = lasagne.layers.InputLayer(shape=(None, n_in), input_var=input_var)
# Our first hidden layer with n_hidden units
# As nonlinearity we use tanh, you could also try rectify
l_hid1 = lasagne.layers.DenseLayer(incoming=l_in,
num_units=n_hidden, nonlinearity=lasagne.nonlinearities.tanh,
W=lasagne.init.GlorotUniform())
# Our output layer (a softmax layer)
l_out = lasagne.layers.DenseLayer(incoming=l_hid1,
num_units=n_out, nonlinearity=lasagne.nonlinearities.softmax)
return l_out
# Parameters
n_in = 28*28
n_hidden = 50
n_out = 10
# Create the network
x = T.dmatrix('x') # the data, one image per row
y = T.lvector('y') # the labels are presented as 1D vector of [int] labels
network = build_mlp(n_in, n_hidden, n_out, x)
# Create a loss expression for training, i.e., a scalar objective we want
# to minimize (for our multi-class problem, it is the cross-entropy loss):
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, y)
loss = loss.mean()
# Create update expressions for training, i.e., how to modify the
# parameters at each training step. Here, we'll use Stochastic Gradient
# Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more.
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.01, momentum=0.9)
# Predict the labels
network_predict_label = T.argmax(lasagne.layers.get_output(network, deterministic=True), axis=1)
# Compile a function performing a training step on a mini-batch (by giving
# the updates dictionary) and returning the corresponding training loss:
train_fn = theano.function(inputs=[x, y], outputs=loss, updates=updates)
# Create the predict_labels function
predict_labels = theano.function(inputs=[x], outputs=network_predict_label)
#Function that helps to iterate over our data in minibatches
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
#Method to compute the accruarcy. Call predict_labels to get the labels for the dataset
def compute_accurarcy(dataset_x, dataset_y):
predictions = predict_labels(dataset_x)
errors = sum(predictions != dataset_y) #Number of errors
accurarcy = 1 - errors/float(len(dataset_y))
return accurarcy
number_of_epochs = 10
print "%d epochs" % number_of_epochs
for epoch in xrange(number_of_epochs):
for batch in iterate_minibatches(train_x, train_y, 20, shuffle=True):
inputs, targets = batch
train_fn(inputs, targets)
accurarcy_dev = compute_accurarcy(dev_x, dev_y)
accurarcy_test = compute_accurarcy(test_x, test_y)
print "%d epoch: Accurarcy on dev: %f, accurarcy on test: %f" % (epoch, accurarcy_dev, accurarcy_test)
print "DONE"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build the MLP
Step2: Create the Train Function
Step3: Train the model
|
7,583
|
<ASSISTANT_TASK:>
Python Code:
%run db2odata.ipynb
%run db2.ipynb
%sql connect reset
%sql connect to sample
%sql -sampledata
%sql SELECT * FROM EMPLOYEE
%odata prompt
%odata DROP TABLE EMPLOYEE
s = %odata -e SELECT lastname, salary from employee where salary > 50000
s = %odata -e SELECT * FROM EMPLOYEE
%odata select * from unknown_table
%sql select * from unknown_table
s = %odata -e -j SELECT * FROM EMPLOYEE LIMIT 1
%odata \
DROP \
DATABASE {settings['database']} \
SCHEMA {settings['schema']} \
TABLE EMPLOYEE
%odata settings
%odata set DATABASE {settings['database']} SCHEMA {settings['schema']}
u = %odata -e select * from employee limit 1
url = %odata -e select * from employee limit 1
print(url)
%odata delete
s = %odata -e SELECT * FROM EMPLOYEE
%odata set maxrows 10
%odata set maxrows -1
%odata select * from employee
%odata set maxrows 10
s = %odata -e SELECT * FROM EMPLOYEE LIMIT 5
s = %odata -e SELECT FIRSTNME, LASTNAME FROM EMPLOYEE LIMIT 5
s = %odata -e SELECT COUNT(*) FROM EMPLOYEE
s = %odata -e -r SELECT COUNT(*) FROM EMPLOYEE LIMIT 5
s = %odata -e -r SELECT COUNT(EMPNO) FROM EMPLOYEE LIMIT 5
%sql -q DROP TABLE UNKNOWN_TBL
%odata DROP TABLE UNKNOWN_TBL
s = %odata -e SELECT * FROM UNKNOWN_TBL
%sql CREATE TABLE UNKNOWN_TBL AS (SELECT * FROM EMPLOYEE) WITH DATA
s = %odata -e SELECT * FROM UNKNOWN_TBL
%odata DROP TABLE UNKNOWN_TBL
s = %odata -e SELECT * FROM UNKNOWN_TBL
%odata DESCRIBE EMPLOYEE
s = %odata -e SELECT EMPNO, WORKDEPT, SALARY FROM EMPLOYEE WHERE SALARY < 40000
s = %odata -e SELECT EMPNO, WORKDEPT, SALARY FROM EMPLOYEE WHERE SALARY < 40000 AND WORKDEPT = 'E21'
s = %odata -e \
SELECT EMPNO, WORKDEPT, SALARY \
FROM EMPLOYEE \
WHERE SALARY < 40000 AND WORKDEPT = 'E21' OR WORKDEPT = 'E11'
s = %odata -e \
SELECT EMPNO, WORKDEPT, SALARY \
FROM EMPLOYEE \
WHERE SALARY < 40000 AND (WORKDEPT = 'E21' OR WORKDEPT = 'E11')
s = %odata -e SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%AA%'
%sql SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%ON'
s = %odata -e SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%ON'
s = %odata -e SELECT * FROM EMPLOYEE LIMIT 5
%%sql -q
DROP TABLE TESTODATA;
CREATE TABLE TESTODATA
(
EMPNO INT NOT NULL,
LASTNAME VARCHAR(10) NOT NULL,
SALARY INT NOT NULL,
BONUS INT
);
%odata DROP TABLE TESTODATA
%odata DESCRIBE TESTODATA
%odata -e INSERT INTO TESTODATA(EMPNO, LASTNAME, SALARY, BONUS) VALUES (1,'Fred',10000,1000)
%odata SELECT * FROM TESTODATA
%odata -e INSERT INTO TESTODATA(EMPNO, LASTNAME, BONUS) VALUES (2,'Wilma',50000)
%sql INSERT INTO TESTODATA(EMPNO, LASTNAME, BONUS) VALUES (2,'Wilma',50000)
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%sql ALTER TABLE TESTODATA ADD CONSTRAINT PKTD PRIMARY KEY (EMPNO)
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%odata DROP TABLE TESTODATA
%odata DESCRIBE TESTODATA
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%odata -e DELETE FROM TESTODATA WHERE EMPNO=2
%odata -e \
INSERT INTO TESTODATA(EMPNO, LASTNAME, SALARY, BONUS) \
VALUES (1,'Fred',10000,1000)
%odata -e UPDATE TESTODATA SET BONUS=2000 WHERE EMPNO=1
%odata SELECT * FROM TESTODATA
%%sql
CREATE OR REPLACE VIEW EMPDEPT AS
(
SELECT LASTNAME, DEPTNAME
FROM EMPLOYEE E, DEPARTMENT D
WHERE E.WORKDEPT = D.DEPTNO
)
%odata DROP TABLE EMPDEPT
%odata SELECT LASTNAME, DEPTNAME FROM EMPDEPT LIMIT 5
%%sql
SELECT
COUNT(*)
FROM
EMPLOYEE E, DEPARTMENT D
WHERE
E.WORKDEPT = D.DEPTNO
AND D.DEPTNAME LIKE '%SYSTEMS%'
%%sql -q
DROP TABLE PATTERN;
CREATE TABLE PATTERN
(
PATTERN_NUMBER INT NOT NULL PRIMARY KEY,
SEARCH VARCHAR(16)
);
%odata DROP TABLE EMPDEPT
%odata DROP TABLE PATTERN
%%sql
CREATE OR REPLACE VIEW EMPDEPT AS
(
SELECT
COUNT(*) AS COUNT
FROM
EMPLOYEE E, DEPARTMENT D
WHERE
E.WORKDEPT = D.DEPTNO
AND D.DEPTNAME LIKE
(
SELECT SEARCH FROM PATTERN WHERE PATTERN_NUMBER=1
)
);
%sql INSERT INTO PATTERN VALUES(1,'%SYSTEMS%')
%sql SELECT * FROM EMPDEPT
%odata UPDATE PATTERN SET SEARCH = '%SERVICE%' WHERE PATTERN_NUMBER = 1
%odata SELECT * FROM EMPDEPT
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DB2 Extensions
Step2: An Brief Introduction to OData
Step3: If you connected to the SAMPLE database, you will have the EMPLOYEE and DEPARTMENT tables available to you. However, if you are connecting to a different database, you will need to execute the next command to populate the tables for you. Note, if you run this command and the two tables already exist, the tables will not be replaced. So don't worry if you execute this command by mistake.
Step4: Requesting data from DB2 using the standard %sql (ibm_db) interface is relatively straight-forward. We just need to place the SQL in the command and execute it to get the results.
Step5: Now that we have a working DB2 connection, we will need to set up an OData service to talk to DB2.
Step6: Selecting Data from a Table
Step7: Viewing the OData Command
Step8: The results will show the URL service command used (http
Step9: One drawback of OData is that we don't get the actual error text returned. We know that the error code is, but the message isn't that descriptive. Using the %sql (DB2) command, we can find out that the table doesn't exist.
Step10: Limiting Output Results
Step11: To limit the results from a OData request, you must add the \$top=x modifier at the end of the service request. The format then becomes
Step12: The last example illustrates two additional features of the %odata command. First, you can span statements over multiple lines by using the backslash character ('\'). You could also use the %%odata command to do this without backslashes, but it unfortunately will not allow for variable substitution. The current settings being used by OData can be found by issuing the SETTINGS command.
Step13: You can also refer to these values by using the settings['name'] variable. So the DROP statement just took the current DATABASE and SCHEMA settings and deleted the definition for the EMPLOYEE table. You could have done this directly with
Step14: And this command will show the connection service being created for us.
Step15: Retrieving URL, OData Command, and Parameters
Step16: You can use this URL to directly access the results through a browser, or any application that can read the results returned by the OData gateway. The print statement below will display the URL as an active link. Click on that to see the results in another browser window.
Step17: When a URL is generated, we need to append the \$format=json tag at the end to tell the OData service and the browser how to handle the results. When we run OData and RESTful calls from a programming language (like Python), we are able to send information in the header which tells the API how to handle the results and parameters. All of the RESTful calls to the OData gateway use the following header information
Step18: SELECT Statements
Step19: You will notice that not all of the rows have been displayed. The output has been limited to 10 lines. 5 lines from the start of the answer set and 5 lines from the bottom of the answer set are displayed. If you want to change the maximum number of rows to be displayed, use the MAXROWS setting.
Step20: If you want an unlimited number of rows returned, set maxrows to -1.
Step21: It is better to limit the results from the answer set by using the LIMIT clause in the SELECT statement. LIMIT will force DB2 to stop retrieving rows after "x" number have been read, while the MAXROWS setting will retrieve all rows and then only display a portion of them. The one advantage of MAXROWS is that you see the bottom 5 rows while you would only be able to do that with DB2 if you could reverse sort the output. The current OData implementation does not have the ability to $orderby, so sorting to reverse the output is not possible.
Step22: Example
Step23: Selecting Columns to Display
Step24: The COUNT(*) function is available as part of a SELECT list and it cannot include any other column names. If you do include other column names they will be ignored.
Step25: One of the unusual behaviors of the COUNT(*) function is that will actually return the entire answer set under the covers. The %odata command strips the count out from the results and doesn't display the rows returned. That is probably not would you expect from this syntax! The COUNT function is better described as the count of physical rows returned. Here is the same example with 5 rows returned and the JSON records.
Step26: One of the recommendations would be not to use the COUNT(*) function to determine the amount of rows that will be retrieved, especially if you expect there to a large of number rows. To minimize the data returned, you can use the form COUNT(column) which will modify the OData request to return the count and ONLY that column in the result set. This is a compromise in terms of the amount of data returned. This example using the -r (raw) flag which results in all of the JSON headers and data to be displayed. The JSON flag (-j) will not display any records.
Step27: FROM Clause
Step28: This actually can cause some issues if you try to reuse the connection information that was created with the UNKNOWN_TBL. Since the service could not determine the structure of the table, the service will not return any column information with a select statement. The next SQL statement will create the UNKNOWN_TBL.
Step29: Retrying the SELECT statement will result in 43 rows with no columns returned!
Step30: To correct this situation, you need to DROP the connect that the %odata program is using and reissue the SELECT statement.
Step31: Now you can try the SQL statement again.
Step32: Describing the Table Contents
Step33: The datatypes are not the same as what one expect from a relational database. You get generic information on the character columns (String), and the numbers (Int16, Decimal). The Decimal specification actually contains the number of digits and decimal places but that isn't returned when using the table display.
Step34: Example
Step35: Example
Step36: Example
Step37: Example
Step38: Example
Step39: Converting to OData will mean that the search will look across the entire string, not just the beginning.
Step40: Limit Clause
Step41: INSERT Command
Step42: We also need to remove the connection information from the system in the event we've run this example before.
Step43: A couple of things about the table design. The salary is NOT NULL, while the BONUS allows for nulls. Unfortunately, the DESCRIBE command only tells us about the columns in the table and their OData data type, and no indication of whether table.
Step44: The initial INSERT will populate the table with valid data. The echo option will show the json document that is sent via the POST command to OData to insert the row.
Step45: Just to make sure things were inserted properly, we retrieve the contents of the table.
Step46: OData (and DB2) will return an error message about our missing SALARY column which requires a value.
Step47: We can try this on the DB2 side as well to get the details of the error.
Step48: DELETE Command
Step49: A primary key is required to issue a DELETE command. You also need to make sure that the primary key column does not contain NULLs because a primary key must always contain a value. The following SQL tries to fix the primary key issue.
Step50: Check to see if we can delete the row yet.
Step51: Adding a primary key after the fact won't help because the service URL would have already recorded the information about the table (and the fact it didn't have a primary key at the time). We need to drop our SERVICE URL and generate another one.
Step52: We do a describe on the table and this will force another service URL to be generated for us.
Step53: Trying the DELETE this time will work.
Step54: Deleting the record again still gives you a successful return code. The call always returns a successful status even if the record doesn't exist.
Step55: UPDATE Command
Step56: At this point we can update their salary.
Step57: We doublecheck the results to make sure we got it right!
Step58: Views
Step59: We also need to drop any service connection you may have created in the past with this table name.
Step60: Now that we have created the view, we can retrieve rows from it just like a standard table.
Step61: You can also create sophisticated VIEWS that can take parameters to adjust the results returned. For instance, consider the following SQL statement which gives me count of employees that work in SYSTEMS departments.
Step62: There are two departments with the name SYSTEMS in them, but there is no easy way to create a view for every possible combination of searches that you may want. Instead what we do is create a table that contains the pattern we want to look for and create the view so that it references this table.
Step63: Now we create a view that access this PATTERN table to do the actual search. Note that values that are inserted into the PATTERN table must have the SQL special characters like % to make sure patterns can be anywhere in the string.
Step64: In order for our view to work properly, we must populate our PATTERN table with a value. To test the view we will use %SYSTEMS% as our first example.
Step65: And now we can test our view by selecting from it.
Step66: Now that we have it working, we can try exactly the same thing but with OData. Our first transaction will update the search key to SERVICE.
Step67: The next OData statement should select the count of employees working in service departments.
|
7,584
|
<ASSISTANT_TASK:>
Python Code:
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Jean-Remi King <jeanremi.king@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LogisticRegression
import mne
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.decoding import (cross_val_multiscore, LinearModel, SlidingEstimator,
get_coef)
print(__doc__)
data_path = mne.datasets.sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
fname_fwd = meg_path / 'sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = meg_path / 'sample_audvis-ave.fif'
subjects_dir = data_path / 'subjects'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = meg_path / 'sample_audvis-cov.fif'
fname_inv = meg_path / 'sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.8
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(None, 10., fir_design='firwin')
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
snr = 3.0
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
stcs = apply_inverse_epochs(epochs, inverse_operator,
lambda2=1.0 / snr ** 2, verbose=False,
method="dSPM", pick_ori="normal")
# Retrieve source space data into an array
X = np.array([stc.lh_data for stc in stcs]) # only keep left hemisphere
y = epochs.events[:, 2]
# prepare a series of classifier applied at each time sample
clf = make_pipeline(StandardScaler(), # z-score normalization
SelectKBest(f_classif, k=500), # select features for speed
LinearModel(LogisticRegression(C=1, solver='liblinear')))
time_decod = SlidingEstimator(clf, scoring='roc_auc')
# Run cross-validated decoding analyses:
scores = cross_val_multiscore(time_decod, X, y, cv=5, n_jobs=None)
# Plot average decoding scores of 5 splits
fig, ax = plt.subplots(1)
ax.plot(epochs.times, scores.mean(0), label='score')
ax.axhline(.5, color='k', linestyle='--', label='chance')
ax.axvline(0, color='k')
plt.legend()
# The fitting needs not be cross validated because the weights are based on
# the training sets
time_decod.fit(X, y)
# Retrieve patterns after inversing the z-score normalization step:
patterns = get_coef(time_decod, 'patterns_', inverse_transform=True)
stc = stcs[0] # for convenience, lookup parameters from first stc
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(np.abs(patterns), vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep, subject='sample')
brain = stc_feat.plot(views=['lat'], transparent=True,
initial_time=0.1, time_unit='s',
subjects_dir=subjects_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Compute inverse solution
Step3: Decoding in sensor space using a logistic regression
Step4: To investigate weights, we need to retrieve the patterns of a fitted model
|
7,585
|
<ASSISTANT_TASK:>
Python Code:
# Use the chown command to change the ownership of repository to user
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# The OS module in python provides functions for interacting with the operating system
import os
# TODO 1
PROJECT_ID = "cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT_ID
REGION = 'us-central1'
# Store the value of `BUCKET` and `PROJECT_ID` in environment variables.
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["BUCKET"] = BUCKET
# Using `mkdir` we can create an empty directory
!mkdir train
# Using `touch` we can create an empty file
!touch train/__init__.py
%%writefile train/model_definition.py
# Here we'll import data processing libraries like Numpy and Tensorflow
import tensorflow as tf
import numpy as np
# Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_model():
# The `tf.keras.Sequential` method will sequential groups a linear stack of layers into a tf.keras.Model.
model = tf.keras.models.Sequential()
# The `Flatten()` method will flattens the input and it does not affect the batch size.
model.add(tf.keras.layers.Flatten(input_shape=x_train.shape[1:]))
# The `Dense()` method is just your regular densely-connected NN layer.
model.add(tf.keras.layers.Dense(1028))
# The `Activation()` method applies an activation function to an output.
model.add(tf.keras.layers.Activation('relu'))
# The `Dropout()` method applies dropout to the input.
model.add(tf.keras.layers.Dropout(0.5))
# The `Dense()` method is just your regular densely-connected NN layer.
model.add(tf.keras.layers.Dense(512))
# The `Activation()` method applies an activation function to an output.
model.add(tf.keras.layers.Activation('relu'))
# The `Dropout()` method applies dropout to the input.
model.add(tf.keras.layers.Dropout(0.5))
# The `Dense()` method is just your regular densely-connected NN layer.
model.add(tf.keras.layers.Dense(256))
# The `Activation()` method applies an activation function to an output.
model.add(tf.keras.layers.Activation('relu'))
# The `Dropout()` method applies dropout to the input.
model.add(tf.keras.layers.Dropout(0.5))
# The `Dense()` method is just your regular densely-connected NN layer.
model.add(tf.keras.layers.Dense(10))
# The `Activation()` method applies an activation function to an output.
model.add(tf.keras.layers.Activation('softmax'))
return model
# The OS module in python provides functions for interacting with the operating system
import os
# The Python time module provides many ways of representing time in code, such as objects, numbers, and strings.
# It also provides functionality other than representing time, like waiting during code execution and measuring the efficiency of your code.
import time
# Here we'll import data processing libraries like Numpy and Tensorflow
import tensorflow as tf
import numpy as np
from train import model_definition
#Get data
# TODO 2
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_dataset(X, Y, epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True)
return dataset
ds_train = create_dataset(x_train, y_train, 20, 5000)
ds_test = create_dataset(x_test, y_test, 1, 1000)
model = model_definition.create_model()
model.compile(
# Using `tf.keras.optimizers.Adam` the optimizer will implements the Adam algorithm.
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
start = time.time()
model.fit(
ds_train,
validation_data=ds_test,
verbose=1
)
print("Training time without GPUs locally: {}".format(time.time() - start))
%%writefile train/train_mult_worker_mirrored.py
# The OS module in python provides functions for interacting with the operating system
import os
# The Python time module provides many ways of representing time in code, such as objects, numbers, and strings.
# It also provides functionality other than representing time, like waiting during code execution and measuring the efficiency of your code.
import time
# Here we'll import data processing libraries like Numpy and Tensorflow
import tensorflow as tf
import numpy as np
from . import model_definition
# The `MultiWorkerMirroredStrategy()` method will work as a distribution strategy for synchronous training on multiple workers.
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
#Get data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# add empty color dimension
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
def create_dataset(X, Y, epochs, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((X, Y))
dataset = dataset.repeat(epochs).batch(batch_size, drop_remainder=True)
return dataset
ds_train = create_dataset(x_train, y_train, 20, 5000)
ds_test = create_dataset(x_test, y_test, 1, 1000)
print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
with strategy.scope():
model = model_definition.create_model()
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3, ),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
start = time.time()
model.fit(
ds_train,
validation_data=ds_test,
verbose=2
)
print("Training time with multiple GPUs: {}".format(time.time() - start))
%%writefile config.yaml
# TODO 3a
# Configure a master worker
trainingInput:
scaleTier: CUSTOM
masterType: n1-highcpu-16
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="cpu_only_fashion_minst_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
%%writefile config.yaml
# TODO 3b
# Configure a master worker
trainingInput:
scaleTier: CUSTOM
masterType: n1-highcpu-16
masterConfig:
acceleratorConfig:
count: 2
type: NVIDIA_TESLA_K80
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_2gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
%%writefile config.yaml
# TODO 3c
# Configure a master worker
trainingInput:
scaleTier: CUSTOM
masterType: n1-highcpu-16
masterConfig:
acceleratorConfig:
count: 4
type: NVIDIA_TESLA_K80
%%bash
now=$(date +"%Y%m%d_%H%M%S")
JOB_NAME="multi_gpu_fashion_minst_4gpu_$now"
gcloud ai-platform jobs submit training $JOB_NAME \
--staging-bucket=gs://$BUCKET \
--package-path=train \
--module-name=train.train_mult_worker_mirrored \
--runtime-version=2.3 \
--python-version=3.7 \
--region=us-west1 \
--config config.yaml
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next we will configure our environment. Be sure to change the PROJECT_ID variable in the below cell to your Project ID. This will be the project to which the Cloud AI Platform resources will be billed. We will also create a bucket for our training artifacts (if it does not already exist).
Step2: Since we are going to submit our training job to Cloud AI Platform, we need to create our trainer package. We will create the train directory for our package and create a blank __init__.py file so Python knows that this folder contains a package.
Step3: Next we will create a module containing a function which will create our model. Note that we will be using the Fashion MNIST dataset. Since it's a small dataset, we will simply load it into memory for getting the parameters for our model.
Step4: Before we submit our training jobs to Cloud AI Platform, let's be sure our model runs locally. We will call the model_definition function to create our model and use tf.keras.datasets.fashion_mnist.load_data() to import the Fashion MNIST dataset.
Step5: Train on multiple GPUs/CPUs with MultiWorkerMirrored Strategy
Step6: Lab Task #3
Step7: If we go through the logs, we see that the training job will take around 5-7 minutes to complete. Let's now attach two Nvidia Tesla K80 GPUs and rerun the training job.
Step8: That was a lot faster! The training job will take upto 5-10 minutes to complete. Let's keep going and add more GPUs!
|
7,586
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import csv
import os
import pickle
import skimage
import numpy as np
from sklearn.utils import shuffle
import cv2
#########################
# Initialize constants
#########################
training_file = 'data/train.p'
validation_file='data/train.p'
testing_file = 'data/test.p'
imgs_path = "new_images/"
labels_path = "signnames.csv"
#########################
# Load data
#########################
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = train['features'], train['labels']
X_test, y_test = test['features'], test['labels']
######################################################################################
# flip image from left to right and add them to new data
######################################################################################
def flip(images):
flipped = np.zeros((images.shape[0], 32, 32, 3))
for i in range(0, images.shape[0]):
flipped[i] = cv2.flip(images[i], 1)
return images
X_train_flipped = flip(X_train)
X_valid_flipped = flip(X_valid)
X_test_flipped = flip(X_test)
######################################################################################
# Add
######################################################################################
X_train = np.concatenate((X_train_flipped, X_train), axis=0)
X_test = np.concatenate((X_test_flipped, X_test), axis=0)
X_valid = np.concatenate((X_valid_flipped, X_valid), axis=0)
y_train = np.concatenate((y_train, y_train), axis=0)
y_test = np.concatenate((y_test, y_test), axis=0)
y_valid = np.concatenate((y_valid, y_valid), axis=0)
print(X_train.shape[0])
print(X_test.shape[0])
print(X_valid.shape[0])
######################################################################################
# splitting and shuffling dataset directly in the pandas
######################################################################################
def split(X, y):
msk = np.random.rand(len(X)) < 0.8
X_train = X[msk]
X_valid = X[~msk]
y_train = y[msk]
y_valid = y[~msk]
return X_train, X_valid, y_train, y_valid
X_train, X_valid, y_train, y_valid = split(X_train, y_train)
#Number of training examples
n_train = X_train.shape[0]
#Number of validation examples
n_validation = X_valid.shape[0]
#Number of testing examples.
n_test = X_test.shape[0]
#What's the shape of an traffic sign image?
image_shape = X_train.shape[1], X_train.shape[2]
#Number of unique classes/labels there are in the dataset.
n_classes = y_valid[y_valid.shape[0]-1]+1
#########################
# Print basic stats
#########################
print("Number of training examples =", n_train)
print("Number of validation examples =", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
import matplotlib.pyplot as plt
import random
%matplotlib inline
#########################
# Print histograms
#########################
# train / test / valid
h1 = plt.hist(y_train, bins='auto', alpha=0.5, color=['green'], label='train')
h2 = plt.hist(y_test, bins='auto', alpha=0.5, color=['blue'], label='test')
h3 = plt.hist(y_valid, bins='auto', alpha=0.5, color=['red'], label='validation')
plt.title("Datasets distribution")
plt.xlabel('Category ID')
plt.ylabel('Number of occurences')
plt.legend(shadow=True, fancybox=True)
plt.grid()
plt.show()
#########################
# Show example image
#########################
print("-------------------------------------------------------------------------")
fig, ax = plt.subplots()
im = plt.imshow(X_train[0][:][:][:], interpolation='nearest')
plt.title('Example image')
ax.axis('off')
plt.show()
#########################
# Show example image
#########################
print("-------------------------------------------------------------------------")
fig, ax = plt.subplots()
im = plt.imshow(X_train[999][:][:][:], interpolation='nearest')
plt.title('Example image')
ax.axis('off')
plt.show()
#########################
# preprocess array of images
# We are going to convert images to grayscale and normalize (-1, +1)
#########################
def preprocess_images(X):
import scipy.misc
import scipy.ndimage
from skimage import data, exposure, img_as_float
R = X[:,:, :, 0]
G = X[:,:, :, 1]
B = X[:, :, :, 2]
# convert to grayscale
X_preprocessed = R * 299. / 1000 + G * 587. / 1000 + B * 114. / 1000
# normalize
X_preprocessed = ((X_preprocessed - 128)/128).astype(np.float32)
#eq_image = exposure.equalize_adapthist(X_preprocessed[0][:][:][:], 1, clip_limit=0.01)
#plt.imshow(eq_image, cmap='gray')
# change dimensions to fit
X_preprocessed = np.expand_dims(X_preprocessed, axis=4)
return X_preprocessed
#########################
# Preprocess all datasets
#########################
X_train_preprocessed = preprocess_images(X_train)
X_valid_preprocessed = preprocess_images(X_valid)
X_test_preprocessed = preprocess_images(X_test)
print("processed")
import tensorflow as tf
from tensorflow.contrib.layers import flatten
BATCH_SIZE = 256
#########################
# Model definition
#########################
def LeNet(x):
mu = 0
sigma = 0.1
# We will create 2 convolution layers
# Layer Convolutional - Input = 32x32x1. Output = 28x28x6.
with tf.name_scope("Convolution_Layer_1"):
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6), name="conv1_b")
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# RELU activation
conv1 = tf.nn.elu(conv1)
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Layer : Convolutional. Output = 10x10x16.
with tf.name_scope("Convolution_Layer_2"):
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# RELU activation
conv2 = tf.nn.elu(conv2)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# Squash image. Input = 5x5x16. Output = 400
fc0 = flatten(conv2)
# We will create 3 fully connected layers
# Input = 400. Output = 120.
with tf.name_scope("Fully_ConnectedLayer_1"):
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Dropout layer - prevent ovefit
fc1 = tf.nn.dropout(fc1, keep_prob)
fc1 = tf.nn.elu(fc1)
# Input = 120. Output = 84.
with tf.name_scope("Fully_Connected_Layer_2"):
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Dropout layer - prevent ovefit
fc2 = tf.nn.dropout(fc2, keep_prob)
fc2 = tf.nn.elu(fc2)
# Input = 84. Output = n_classes (43).
with tf.name_scope("Fully_Connected_Layer_3"):
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
############################################
# Create graph
############################################
############################################
# Define placeholders
############################################
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, n_classes)
keep_prob = tf.placeholder(tf.float32)
############################################
# Define network and training operations
############################################
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer()
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
with tf.name_scope('summaries'):
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('acc', accuracy_operation)
saver = tf.train.Saver()
summary_op = tf.summary.merge_all()
############################################
# Function evaluating accuracy of the prediction over dataset
############################################
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
# make batches of images and compute accuracy
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
############################################
# Main train model loop
############################################.
EPOCHS = 10000
model_name = './model/lenet'
model_path = './model/'
model_meta_path = './model/lenet.meta'
max_validation_accuracy = 0.0
number_of_epochs_without_improvement = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter("output", sess.graph)
num_examples = len(X_train_preprocessed)
saver = tf.train.import_meta_graph(model_meta_path)
saver.restore(sess, tf.train.latest_checkpoint(model_path))
print("Training...")
for i in range(EPOCHS):
for offset in range(0, n_train, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train_preprocessed[offset:end], y_train[offset:end]
summary = sess.run([training_operation, accuracy_operation], feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
#writer.add_summary(summary, i)
validation_accuracy = evaluate(X_valid_preprocessed, y_valid)
test_accuracy = evaluate(X_test_preprocessed, y_test)
train_accuracy = evaluate(X_train_preprocessed, y_train)
# print basic summary
print("EPOCH {} ...".format(i+1))
print("Validation Set Accuracy = {:.5f}".format(validation_accuracy))
print("Test Set Accuracy = {:.5f}".format(test_accuracy))
print("Train Set Accuracy = {:.5f}".format(train_accuracy))
# If we have best model according the validation accuracy, we will save it to model "lenet"
if (max_validation_accuracy < validation_accuracy):
max_validation_accuracy = validation_accuracy
saver.save(sess, model_name)
print("Model saved")
else:
number_of_epochs_without_improvement = number_of_epochs_without_improvement + 1
if (number_of_epochs_without_improvement >= 50):
print("ending")
break
writer.close()
from os import listdir
from PIL import Image as PImage
############################################
# Load images from the directory - again, just for case we do not want to training loop
############################################
def loadImages(path):
# return array of images
import os, fnmatch
imagesList = fnmatch.filter(os.listdir(path), '*.png')
loadedImages = []
for image in imagesList:
img = PImage.open(path + image)
loadedImages.append(img)
return loadedImages
############################################
# Provide routine for loading labels from csv file
############################################
def loadLabelNames(path):
import csv;
signames = []
with open(path) as csvfile:
csvreader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(csvreader, None)
for row in csvreader:
signames.append(row)
return signames
############################################
# Show new example images
############################################
imgs = loadImages(imgs_path)
signames = loadLabelNames(labels_path)
new_imgs = np.zeros((12, 32, 32, 3))
new_names = ["Speed limit (60km/h)", "Yield", "Speed limit (30km/h)","Children crossing", "Children crossing", "Speed limit (30km/h)", "Stop","Priority road","Yield", "Speed limit (30km/h)", "Go straight or left", "General caution"]
index = 0
for img in imgs:
img = img.resize((32,32), PImage.ANTIALIAS)
im_arr = np.fromstring(img.tobytes(), dtype=np.uint8)
im_arr = im_arr.reshape((1, img.size[1], img.size[0], 3))
plt.imshow(img)
new_imgs[index] = im_arr
plt.show()
index = index + 1
############################################
# We will restore trained model, load all new images and compare prediction with the
# ground truth sign. If sign is correct, we will increase accuracy.
############################################
probabs_i = []
indicies_i = []
index = 0
pred_acc = 0.0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
new_saver = tf.train.import_meta_graph('./model/lenet.meta')
new_saver.restore(sess, tf.train.latest_checkpoint('./model/'))
# your images in an array
imgs = loadImages(imgs_path)
signames = loadLabelNames(labels_path)
plt.figure("images")
for img in imgs:
img = img.resize((32,32), PImage.ANTIALIAS)
im_arr = np.fromstring(img.tobytes(), dtype=np.uint8)
im_arr = im_arr.reshape((1, img.size[1], img.size[0], 3))
# you can show every image
plt.subplot(212)
plt.imshow(img)
plt.show()
im_arr = preprocess_images(im_arr)
predicted = sess.run(logits,feed_dict={x: im_arr, keep_prob: 1})
soft = sess.run(tf.nn.softmax(predicted))
print(soft.shape)
probabs, indicies = sess.run(tf.nn.top_k(soft, k=5))
probabs_i.append(probabs)
indicies_i.append(indicies)
for j in range(0, predicted.shape[0]):
p = signames[np.argmax(soft[j], axis = 0)]
print("Predicted: "+p[1])
print("Ground truth: "+new_names[index])
if new_names[index].lower() == p[1].lower():
print("Correct prediction")
pred_acc += 1.0
else:
print("Uncorrect prediction")
print("------------------------------------------")
index = index + 1
pred_acc = pred_acc / index
print("Final accuracy is: " + str(pred_acc * 100) + "%")
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
print("Top 5 softmax probabs for each image:")
print(len(probabs_i))
for i in range(len(probabs_i)):
print("---------------------------------------- Image number: "+str(i+1)+"-------------------------------")
softmax_arr = probabs_i[i][0][:]
for j in range(softmax_arr.shape[0]):
print("{0:.3f}".format((softmax_arr[j]*100)) + " %")
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Include an exploratory visualization of the dataset
Step3: Step 2
Step4: Model Architecture
Step5: Train, Validate and Test the Model
Step6: Step 3
Step7: Predict the Sign Type for Each Image
Step8: Output Top 5 Softmax Probabilities For Each Image Found on the Web
Step9: Project Writeup
|
7,587
|
<ASSISTANT_TASK:>
Python Code:
mu = 0
std = 1
rv = sp.stats.norm(mu, std)
rv
xx = np.linspace(-5, 5, 100)
plt.plot(xx, rv.pdf(xx))
plt.ylabel("p(x)")
plt.title("pdf of normal distribution")
plt.show()
np.random.seed(0)
x = rv.rvs(100)
x
sns.distplot(x, kde=False, fit=sp.stats.norm)
plt.show()
np.random.seed(0)
x = np.random.randn(100)
plt.figure(figsize=(7,7))
sp.stats.probplot(x, plot=plt)
plt.axis("equal")
plt.show()
np.random.seed(0)
x = np.random.rand(100)
plt.figure(figsize=(7,7))
sp.stats.probplot(x, plot=plt)
plt.ylim(-0.5, 1.5)
plt.show()
xx = np.linspace(-2, 2, 100)
plt.figure(figsize=(6,9))
for i, N in enumerate([1, 2, 10]):
X = np.random.rand(1000, N) - 0.5
S = X.sum(axis=1)/np.sqrt(N)
plt.subplot(3, 2, 2*i+1)
sns.distplot(S, bins=10, kde=False, norm_hist=True)
plt.xlim(-2, 2)
plt.yticks([])
plt.subplot(3, 2, 2*i+2)
sp.stats.probplot(S, plot=plt)
plt.tight_layout()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: pdf 메서드를 사용하면 확률 밀도 함수(pdf
Step2: 시뮬레이션을 통해 샘플을 얻으려면 rvs 메서드를 사용한다.
Step3: Q-Q 플롯
Step4: 정규 분포를 따르지 않는 데이터 샘플을 Q-Q 플롯으로 그리면 다음과 같이 직선이 아닌 휘어진 형태로 나타난다.
Step5: 중심 극한 정리
|
7,588
|
<ASSISTANT_TASK:>
Python Code:
# Links via http://www.gapminder.org/data/
population_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0XOoBL_n5tAQ&output=xls"
fertility_url = "http://spreadsheets.google.com/pub?key=phAwcNAVuyj0TAlJeCEzcGQ&output=xls"
life_expectancy_url = "http://spreadsheets.google.com/pub?key=tiAiXcrneZrUnnJ9dBU-PAw&output=xls"
def get_data(url):
# Get the data from the url and return only 1962 - 2013
df = pd.read_excel(url, index_col=0)
df = df.unstack().unstack()
df = df[(df.index >= 1964) & (df.index <= 2013)]
df = df.unstack().unstack()
return df
fertility_df = get_data(fertility_url)
life_expectancy_df = get_data(life_expectancy_url)
population_df = get_data(population_url)
fertility_df.to_hdf('fertility_df.hdf', 'df')
life_expectancy_df.to_hdf('life_expectancy_df.hdf', 'df')
population_df.to_hdf('population_df.hdf', 'df')
fertility_df = pd.read_hdf('fertility_df.hdf', 'df')
life_expectancy_df = pd.read_hdf('life_expectancy_df.hdf', 'df')
population_df = pd.read_hdf('population_df.hdf', 'df')
# have common countries across all data
fertility_df = fertility_df.drop(fertility_df.index.difference(life_expectancy_df.index))
population_df = population_df.drop(population_df.index.difference(life_expectancy_df.index))
# get a size value based on population, but don't let it get too small
population_df_size = np.sqrt(population_df/np.pi)/200
min_size = 3
population_df_size = population_df_size.where(population_df_size >= min_size).fillna(min_size)
fertility_df.to_csv('fertility.csv')
population_df.to_csv('population.csv')
life_expectancy_df.to_csv('life_expectancy.csv')
regions_url = "https://docs.google.com/spreadsheets/d/1OxmGUNWeADbPJkQxVPupSOK5MbAECdqThnvyPrwG5Os/pub?gid=1&output=xls"
regions_df = pd.read_excel(regions_url, index_col=0)
regions_df = regions_df.drop(regions_df.index.difference(life_expectancy_df.index))
#regions_df.Group = regions_df.Group.astype('category')
#cats = list(regions_df.Group.cat.categories)
#def get_color(r):
# index = cats.index(r.Group)
return Spectral6[cats.index(r.Group)]
regions_df['region_color'] = regions_df.apply(get_color, axis=1)
# Set up the data.
#
# We make a dictionary of sources that can then be passed to the callback so they are ready for JS object to use.
#
# Dictionary_of_sources is:
# {
# 1962: '_1962',
# 1963: '_1963',
# ....
# }
# We turn this into a string and replace '_1962' with _1962. So the end result is js_source_array:
# '{1962: _1962, 1963: _1963, ....}'
#
# When this is passed into the callback and then accessed at runtime,
# the _1962, _1963 are replaced with the actual source objects that are passed in as args.
sources = {}
years = list(fertility_df.columns)
region_color = regions_df['region_color']
region_color.name = 'region_color'
for year in years:
fertility = fertility_df[year]
fertility.name = 'fertility'
life = life_expectancy_df[year]
life.name = 'life'
population = population_df_size[year]
population.name = 'population'
new_df = pd.concat([fertility, life, population, region_color], axis=1)
sources['_' + str(year)] = ColumnDataSource(new_df)
dictionary_of_sources = dict(zip([x for x in years], ['_%s' % x for x in years]))
js_source_array = str(dictionary_of_sources).replace("'", "")
# Set up the plot
xdr = Range1d(1, 9)
ydr = Range1d(20, 100)
plot = Plot(
x_range=xdr,
y_range=ydr,
title="",
plot_width=800,
plot_height=400,
outline_line_color=None,
toolbar_location=None,
)
AXIS_FORMATS = dict(
minor_tick_in=None,
minor_tick_out=None,
major_tick_in=None,
major_label_text_font_size="10pt",
major_label_text_font_style="normal",
axis_label_text_font_size="10pt",
axis_line_color='#AAAAAA',
major_tick_line_color='#AAAAAA',
major_label_text_color='#666666',
major_tick_line_cap="round",
axis_line_cap="round",
axis_line_width=1,
major_tick_line_width=1,
)
xaxis = LinearAxis(SingleIntervalTicker(interval=1), axis_label="Live births per woman", **AXIS_FORMATS)
yaxis = LinearAxis(SingleIntervalTicker(interval=20), axis_label="Average life expectancy (years)", **AXIS_FORMATS)
plot.add_layout(xaxis, 'below')
plot.add_layout(yaxis, 'left')
# Add the year in background (add before circle)
text_source = ColumnDataSource({'year': ['%s' % years[0]]})
text = Text(x=2, y=35, text='year', text_font_size='150pt', text_color='#EEEEEE')
plot.add_glyph(text_source, text)
# Add the circle
renderer_source = sources['_%s' % years[0]]
circle_glyph = Circle(
x='fertility', y='life', size='population',
fill_color='region_color', fill_alpha=0.8,
line_color='#7c7e71', line_width=0.5, line_alpha=0.5)
circle_renderer = plot.add_glyph(renderer_source, circle_glyph)
# Add the hover (only against the circle and not other plot elements)
tooltips = "@index"
plot.add_tools(HoverTool(tooltips=tooltips, renderers=[circle_renderer]))
# Add the slider
code =
var year = slider.get('value'),
sources = %s,
new_source_data = sources[year].get('data');
renderer_source.set('data', new_source_data);
renderer_source.trigger('change');
text_source.set('data', {'year': [String(year)]});
text_source.trigger('change');
% js_source_array
callback = Callback(args=sources, code=code)
slider = Slider(start=years[0], end=years[-1], value=1, step=1, title="Year", callback=callback)
callback.args["slider"] = slider
callback.args["renderer_source"] = renderer_source
callback.args["text_source"] = text_source
# Add the legend
text_x = 7
text_y = 95
text_properties = dict(x=text_x, text_font_size='10pt', text_color='#666666')
circle_properties = dict(size=10, line_color=None, fill_alpha=0.8)
for i, region in enumerate(cats):
plot.add_glyph(Text(y=text_y, text=[region], **text_properties))
plot.add_glyph(Circle(x=text_x - 0.1, y=text_y + 2, fill_color=Spectral6[i], **circle_properties))
text_y = text_y - 5
# Stick the plot and the slider together
layout = vplot(plot, hplot(slider))
with open('my_template.html', 'r') as f:
template = Template(f.read())
script, div = components(layout)
html = template.render(
title="Bokeh - Gapminder demo",
plot_script=script,
plot_div=div,
)
display(HTML(html))
with open('gapminder.html', 'w') as f:
f.write(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get the Data
Step2: Get the regions and color them
Step4: Build the plot
Step5: Embed in your own template
Step6: To Do
|
7,589
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from pandas import date_range
import bqplot.pyplot as plt
from bqplot import ColorScale
security_1 = np.cumsum(np.random.randn(150)) + 100.0
security_2 = np.cumsum(np.random.randn(150)) + 100.0
fig = plt.figure(title="Security 1")
axes_options = {"x": {"label": "Index"}, "y": {"label": "Price"}}
# x values default to range of values when not specified
line = plt.plot(security_1, axes_options=axes_options)
fig
line.colors = ["DarkOrange"]
# The opacity allows us to display the Line while featuring other Marks that may be on the Figure
line.opacities = [0.5]
line.stroke_width = 2.5
line.fill = "bottom"
line.fill_opacities = [0.2]
line.line_style = "dashed"
line.interpolation = "basis"
line.marker = "triangle-down"
# Here we define the dates we would like to use
dates = date_range(start="01-01-2007", periods=150)
fig = plt.figure(title="Time Series")
axes_options = {"x": {"label": "Date"}, "y": {"label": "Security 1"}}
time_series = plt.plot(dates, security_1, axes_options=axes_options)
fig
dates_new = date_range(start="06-01-2007", periods=150)
fig = plt.figure()
axes_options = {"x": {"label": "Date"}, "y": {"label": "Price"}}
line = plt.plot(
dates,
[security_1, security_2],
labels=["Security 1", "Security 2"],
axes_options=axes_options,
display_legend=True,
)
fig
line.x, line.y = [dates, dates_new], [security_1, security_2]
fig = plt.figure()
axes_options = {
"x": {"label": "Date"},
"y": {"label": "Security 1"},
"color": {"visible": False},
}
# add a custom color scale to color the lines
plt.scales(scales={"color": ColorScale(colors=["Red", "Green"])})
dates_color = date_range(start="06-01-2007", periods=150)
securities = 100.0 + np.cumsum(np.random.randn(150, 10), axis=0)
# we generate 10 random price series and 10 random positions
positions = np.random.randint(0, 2, size=10)
# We pass the color scale and the color data to the plot method
line = plt.plot(dates_color, securities.T, color=positions, axes_options=axes_options)
fig
line.color = None
fig = plt.figure(animation_duration=1000)
patch = plt.plot(
[],
[],
fill_colors=["orange", "blue", "red"],
fill="inside",
axes_options={"x": {"visible": False}, "y": {"visible": False}},
stroke_width=10,
close_path=True,
display_legend=True,
)
patch.x = (
[
[0, 2, 1.2, np.nan, np.nan, np.nan, np.nan],
[0.5, 2.5, 1.7, np.nan, np.nan, np.nan, np.nan],
[4, 5, 6, 6, 5, 4, 3],
],
)
patch.y = [
[0, 0, 1, np.nan, np.nan, np.nan, np.nan],
[0.5, 0.5, -0.5, np.nan, np.nan, np.nan, np.nan],
[1, 1.1, 1.2, 2.3, 2.2, 2.7, 1.0],
]
fig
patch.opacities = [0.1, 0.2]
patch.x = [
[2, 3, 3.2, np.nan, np.nan, np.nan, np.nan],
[0.5, 2.5, 1.7, np.nan, np.nan, np.nan, np.nan],
[4, 5, 6, 6, 5, 4, 3],
]
patch.close_path = False
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Line Chart
Step2: We can explore the different attributes by changing each of them for the plot above
Step3: In a similar way, we can also change any attribute after the plot has been displayed to change the plot. Run each of the cells below, and try changing the attributes to explore the different features and how they affect the plot.
Step4: To switch to an area chart, set the fill attribute, and control the look with fill_opacities and fill_colors.
Step5: While a Lines plot allows the user to extract the general shape of the data being plotted, there may be a need to visualize discrete data points along with this shape. This is where the markers attribute comes in.
Step6: The marker attributes accepts the values square, circle, cross, diamond, square, triangle-down, triangle-up, arrow, rectangle, ellipse. Try changing the string above and re-running the cell to see how each marker type looks.
Step7: Plotting multiples sets of data
Step8: We pass each data set as an element of a list
Step9: Similarly, we can also pass multiple x-values for multiple sets of y-values
Step10: Coloring Lines according to data
Step11: We can also reset the colors of the Line to their defaults by setting the color attribute to None.
Step12: Patches
|
7,590
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import getpass, os
os.environ['PSQL_USER']='dengueadmin'
os.environ['PSQL_HOST']='localhost'
os.environ['PSQL_DB']='dengue'
os.environ['PSQL_PASSWORD']=getpass.getpass("Enter the database password: ")
os.chdir('..')
from infodenguepredict.data.infodengue import get_temperature_data, get_alerta_table, get_tweet_data
%pylab inline
A = get_alerta_table(3304557)#(3303500)
T = get_temperature_data(3304557)#(3303500)
Tw = get_tweet_data(3304557)#(3303500)
A.head()
T = T[~T.index.duplicated()]
T.to_csv('temperature_rio.csv', header=True, sep=',')
T.head()
Tw = Tw[~Tw.index.duplicated()]
Tw.head()
T.resample('W-SUN').mean().tail()
Full = A.join(T.resample('W-SUN').mean()).join(Tw.resample('W-SUN').sum())
Full.head()
Short = Full.dropna()
Short.head()
Short[['casos_est', 'temp_min', 'umid_min', 'numero']].plot(subplots=True, figsize=(15,10),grid=True);
from infodenguepredict.models import sarimax,GAS,GASX
import statsmodels.api as sm
fig, axes = plt.subplots(1, 2, figsize=(15, 4))
fig = sm.graphics.tsa.plot_acf(Full.ix[1:, 'casos'], lags=52, ax=axes[0])
fig = sm.graphics.tsa.plot_pacf(Full.ix[1:, 'casos'], lags=52, ax=axes[1])
# Short.casos = Short.casos.apply(pd.np.log)
model_1 = sarimax.build_model(Full, 'casos', [])
fit_1 = model_1.fit()
fit_1.summary()
def plot_pred(fit):
plt.Figure(figsize=(10,7))
predict = fit.get_prediction(start='2017-01-01', dynamic=False)
predict_ci = predict.conf_int()
Full.casos.plot(style='o',label='obs')
predict.predicted_mean.plot(style='r--', label='In sample')
plt.fill_between(predict_ci.index, predict_ci.ix[:, 0], predict_ci.ix[:, 1], color='r', alpha=0.1)
forecast = fit.get_prediction(start='2017-03-05', end='2017-06-21', dynamic=False)
forecast_ci = forecast.conf_int()
forecast.predicted_mean.plot(style='b--', label='Out of Sample')
plt.fill_between(forecast_ci.index, forecast_ci.ix[:, 0], forecast_ci.ix[:, 1], color='b', alpha=0.1)
plt.legend(loc=0)
plot_pred(fit_1)
model_2 = GAS.build_model(Full, ar=2, sc=6, target='casos')
fit_2 = model_2.fit()
fit_2.summary()
model_2.plot_fit()
plt.savefig('GAS_in_sample.png')
Full.casos.plot(style='ko')
model_2.plot_predict(h=10, past_values=52)
model_2.plot_z(figsize=(15,5))
plt.figure()
ax = plt.gca()
train = Full.loc[Full.index<'2015-01-01']
model_3 = GAS.build_model(train, ar=2, sc=6, target='casos')
fit_3 = model_3.fit()
Full.casos.plot(style='ko', ax=ax, figsize=(15,10))
plt.hold(True)
model_3.plot_predict(h=10, past_values=20, ax=ax, intervals=True, figsize=(15,10))
model_4 = GASX.build_model(Full.dropna(), ar=4, sc=6, formula='casos~1+temp_min')
fit_4 = model_4.fit()
fit_4.summary()
model_4.plot_fit()
model_4.plot_predict(h=10, past_values=15)
rio = get_alerta_table(state='RJ')
rio.head()
for col in ['casos_est_min', 'casos_est_max', 'Localidade_id', 'versao_modelo', 'municipio_nome']:
del rio[col]
rio.head()
riopiv = rio.pivot(index=rio.index, columns='municipio_geocodigo')
riopiv.head()
riopiv['SE'].head()
riopiv.columns = ['{}_{}'.format(*col).strip() for col in riopiv.columns.values]
riopiv.head()
riopiv.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading The Data
Step2: Let's look at the tables
Step3: Let's try to join the tables by date. To align them, we must downsample each one to a weekly time frame
Step4: Note que as datas para as datas mais antigas os dados faltantes de Temperatura e Tweets, foram substituídos por NaN. Podemos remover estas datas, ficando com uma tabela sem dados faltantes. Mas perde-se mais de dois anos de dados.
Step5: Calculando uma previsão
Step6: Splitting the dataset for out-of-sample prediction validation
Step7: Looking at state-wide data
Step8: Let's keep only the columns we want to use
Step9: Converting dataframe from long format to wide format
Step10: Now we have a multi-level column index. It may be preferable to flatten it.
|
7,591
|
<ASSISTANT_TASK:>
Python Code:
import dir_constants as dc
from tqdm import tqdm_notebook
def find_dupe_dates(group):
return pd.to_datetime(group[group.duplicated('date')]['date'].values)
def merge_dupe_dates(group):
df_chunks = []
dupe_dates = find_dupe_dates(group)
df_chunks.append(group[~group['date'].isin(dupe_dates)])
for date in dupe_dates:
problem_rows = group[group['date'] == date]
ori_index = problem_rows.index
keep_row = problem_rows.iloc[-1].to_dict()
keep_row['outs_princp_beg'] = problem_rows.ix[ori_index[0],column_iloc_map['outs_princp_beg']]
summed = problem_rows.sum()
keep_row['princp_paid'] = summed['princp_paid']
keep_row['int_paid'] = summed['int_paid']
keep_row['fee_paid'] = summed['fee_paid']
keep_row['amt_due'] = summed['amt_due']
keep_row['amt_paid'] = summed['amt_paid']
keep_row['charged_off_amt'] = summed['charged_off_amt']
keep_row['recovs'] = summed['recovs']
keep_row['recov_fees'] = summed['recov_fees']
df_chunks.append(pd.DataFrame(pd.Series(keep_row),columns=[ori_index[-1]]).T)
return pd.concat(df_chunks)
platform = 'lendingclub'
store = pd.HDFStore(
dc.home_path+'/justin_tinkering/data_science/lendingclub/{0}_store.h5'.
format(platform),
append=True)
pmt_hist_ids = store['pmt_hist_ids'].astype(int)
max_id = pmt_hist_ids.max()
chunksize = 800
n_chunks = len(pmt_hist_ids)//chunksize + 1
# fix loans with double month entries _________________________________________
# left_bound = 0
# right_bound = pmt_hist_ids[chunksize]
already_good_dfs = []
fixed_dfs = []
k = 0
for n in tqdm_notebook(np.arange(n_chunks)):
if n == 0:
left_bound = 0
else:
left_bound = pmt_hist_ids[n*chunksize]
if n == (n_chunks - 1):
right_bound = max_id
else:
right_bound = pmt_hist_ids[(n+1)*chunksize]
chunk = pd.read_hdf(
store,
'pmt_hist_intermediary_1',
where='(loan_id_num > left_bound) & (loan_id_num <= right_bound)')
loans_with_two_entries_in_same_month = chunk[chunk.duplicated(
['loan_id', 'date'])]
dup_date_ids = loans_with_two_entries_in_same_month['loan_id'].unique()
if k == 0:
column_iloc_map = {
col_name: chunk.iloc[-1].index.get_loc(col_name)
for col_name in chunk.columns.values
}
k += 1
id_grouped = chunk.groupby('loan_id')
already_good = chunk[~chunk['loan_id'].isin(dup_date_ids)]
for ids, group in id_grouped:
if ids in dup_date_ids:
fixed_dfs.append(merge_dupe_dates(group))
else:
pass
already_good_dfs.append(already_good)
# Create min_itemsize_dict for allocating size when storing ___________________
min_itemsize_dict = {}
for col in already_good.columns:
if already_good[col].dtype == np.object:
print(col, already_good[col].str.len().max())
if col in ['State', 'VINTAGE', 'grade']:
pass
else:
min_itemsize_dict[col] = 15
col_dtype_map = already_good_dfs[0].dtypes.to_dict()
all_fixed_dfs = pd.concat(fixed_dfs)
for col, dtype in col_dtype_map.items():
all_fixed_dfs[col] = all_fixed_dfs[col].astype(dtype)
k = 0
for chunk in tqdm_notebook([all_fixed_dfs] + already_good_dfs):
if k == 0:
store.append(
'pmt_hist_intermediary_2',
chunk,
data_columns=True,
index=True,
append=False,
min_itemsize=min_itemsize_dict)
k += 1
else:
store.append(
'pmt_hist_intermediary_2',
chunk,
data_columns=True,
index=True,
append=True)
store.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are loans that have multiple row entries per month (as in multiple pmts in same month) and there are also loans that don't have any entry for a month
Step2: store before next cleaning step
|
7,592
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import numpy.random as random
mean = 3
std = 2
data = random.normal(loc=mean, scale=std, size=50000)
print(len(data))
print(data.mean())
print(data.std())
%matplotlib inline
import matplotlib.pyplot as plt
import scipy.stats as stats
def plot_normal(xs, mean, std, **kwargs):
norm = stats.norm(mean, std)
plt.plot(xs, norm.pdf(xs), **kwargs)
xs = np.linspace(-5, 15, num=200)
plot_normal(xs, mean, std, color='k')
plt.hist(data, bins=200)
plt.show()
plt.hist(data, bins=200, density=True)
plt.show()
plt.hist(data, bins=200, density=True, histtype='step', lw=2)
plt.show()
plt.hist(data, bins=200, density=True, histtype='step', lw=2)
norm = stats.norm(mean, std)
plt.plot(xs, norm.pdf(xs), color='k', lw=2)
plt.show()
kde = stats.gaussian_kde(data)
xs = np.linspace(-5, 15, num=200)
plt.plot(xs, kde(xs))
plt.show()
def f(x):
return 2*x + 12
mean = 1.
std = 1.4
data = random.normal(loc=mean, scale=std, size=50000)
d_t = f(data) # transform data through f(x)
plt.hist(data, bins=200, density=True, histtype='step', lw=2)
plt.hist(d_t, bins=200, density=True, histtype='step', lw=2)
plt.ylim(0, .35)
plt.show()
print('mean = {:.2f}'.format(d_t.mean()))
print('std = {:.2f}'.format(d_t.std()))
def f2(x):
return (np.cos((1.5*x + 2.1))) * np.sin(0.3*x) - 1.6*x
d_t = f2(data)
plt.subplot(121)
plt.hist(d_t, bins=200, density=True, histtype='step', lw=2)
plt.subplot(122)
kde = stats.gaussian_kde(d_t)
xs = np.linspace(-10, 10, 200)
plt.plot(xs, kde(xs), 'k')
plot_normal(xs, d_t.mean(), d_t.std(), color='g', lw=3)
plt.show()
print('mean = {:.2f}'.format(d_t.mean()))
print('std = {:.2f}'.format(d_t.std()))
import sympy
x = sympy.symbols('x')
f = sympy.cos(1.5*x+2.1) * sympy.sin(x/3) - 1.6*x
dfx = sympy.diff(f, x)
dfx
m = dfx.subs(x, mean)
m
plt.hist(d_t, bins=200, density=True, histtype='step', lw=2)
plot_normal(xs, f2(mean), abs(float(m)*std), color='k', lw=3, label='EKF')
plot_normal(xs, d_t.mean(), d_t.std(), color='r', lw=3, label='MC')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As you can see from the print statements we got 5000 points that have a mean very close to 3, and a standard deviation close to 2.
Step2: But we really want to plot the PDF of the discrete data, not the idealized function.
Step3: that is not very useful to us - we want the PDF, not bin counts. Fortunately hist includes a density parameter which will plot the PDF for us.
Step4: I may not want bars, so I can specify the histtype as 'step' to get a line.
Step5: To be sure it is working, let's also plot the idealized Gaussian in black.
Step6: There is another way to get the approximate distribution of a set of data. There is a technique called kernel density estimate that uses a kernel to estimate the probability distribution of a set of data. SciPy implements it with the function gaussian_kde. Do not be mislead by the name - Gaussian refers to the type of kernel used in the computation. This works for any distribution, not just Gaussians. In this section we have a Gaussian distribution, but soon we will not, and this same function will work.
Step7: Monte Carlo Simulations
Step8: This is what we expected. The input is the Gaussian $\mathcal{N}(\mu=1, \sigma=1.4)$, and the function is $f(x) = 2x+12$. Therefore we expect the mean to be shifted to $f(\mu) = 2*1+12=14$. We can see from the plot and the print statement that this is what happened.
Step9: Here I passed the data through the nonlinear function $f(x) = \cos(1.5x+2.1)\sin(\frac{x}{3}) - 1.6x$. That function is quite close to linear, but we can see how much it alters the pdf of the sampled data.
Step10: We can now compute the slope of the function by evaluating the derivative at the mean.
Step11: The equation of a line is $y=mx+b$, so the new standard deviation should be $~1.67$ times the input std. We can compute the new mean by passing it through the original function because the linearized function is just the slope of f(x) evaluated at the mean. The slope is a tangent that touches the function at $x$, so both will return the same result. So, let's plot this and compare it to the results from the monte carlo simulation.
|
7,593
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import kadro as kd
%matplotlib inline
np.random.seed(42)
n = 20
df = pd.DataFrame({
'a': np.random.randn(n),
'b': np.random.randn(n),
'c': ['foo' if x > 0.5 else 'bar' for x in np.random.rand(n)],
'd': ['fizz' if x > 0.6 else 'bo' for x in np.random.rand(n)]
})
df = df.sort_values(['c', 'd'])
print(df)
kf = kd.Frame(df)
kf
kf.mutate(e = lambda _: _['a'] + _['b']*2)
(kf
.mutate(e = lambda _: _['a'] + _['b']*2,
f = lambda _: np.sqrt(_['e']),
a = lambda _: _['a'] / 2))
(kf
.filter(lambda _: _['a'] > 0,
lambda _: _['b'] > 0))
kf.slice(2, 3, 10)
kf.slice([2, 3, 10])
kf.head(5)
kf.head(5).tail(3)
kf.select('b', 'c')
kf.select(['b', 'c'])
kf
kf.rename({"aa":"a", "bb":"b"})
kf.set_names(["a", "b", "c", "omg_d"])
kf.drop("a", "b")
kf.drop(["a", "b"])
kf.group_by("c", "d")
kf.agg(m_a = lambda _: np.mean(_['a']),
v_b = lambda _: np.var(_['b']),
cov_ab = lambda _: np.cov(_['a'], _['b'])[1,1])
(kf
.group_by("c", "d")
.agg(m_a = lambda _: np.mean(_['a']),
v_b = lambda _: np.var(_['b']),
cov_ab = lambda _: np.cov(_['a'], _['b'])[1,1]))
kf.sort("a")
kf.group_by("c", "d").sort("a")
kf.group_by("a")
kf.group_by("a").ungroup()
kf.sample_n(10)
kf.sample_n(1000, replace=True).sort("a").head(5)
def heavy_func(frame, colname, multy):
frame[colname] = frame[colname] * multy
return frame
(kf
.pipe(heavy_func, colname = "b", multy = 2)
.pipe(heavy_func, colname = "a", multy = 10))
kf.gather('key', 'value')
df_age = pd.DataFrame({
'name': ['vincent', 'tim', 'anna'],
'age': [28, 30, 25]
})
df_length = pd.DataFrame({
'name': ['vincent', 'tim'],
'length': [188, 172]
})
kd_age = kd.Frame(df_age)
kd_length = kd.Frame(df_length)
kd_age.left_join(kd_length)
kd_age.inner_join(kd_length)
kf.plot('a', 'b', kind = 'scatter')
(kf
.mutate(a = lambda _: _['a'] + _['b'])
.filter(lambda _: _['a'] < 1)
.plot('a', 'b', kind = 'scatter', title = 'foobar'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This is the data that we'll work with. We won't change the dataframe or it's api, rather we'll wrap it in an object that contains extra methods.
Step2: This new object contains the original pandas dataframe as a child, this means that you can always access the original data via <obj_name>.df. You will always be able to work with pure pandas if need be but a few methods are added to enchance readability and composability.
Step3: The column is added via a lambda function. This lambda function accepts the original dataframe. This allows you to refer to columns and allows you to apply any function you'd like. You can also create multiple columns in a single mutate statement.
Step4: You might appreciate that this method is somewhat lazy. You can use the 1st new column that you've created when creating your second one without the need to call mutate again.
Step5: Again you should notice a lazy structure. No need to call .filter multiple times as you can apply multiple filters in a single step.
Step6: tibble.head and tibble.tail
Step7: tibble.select
Step8: tibble.rename
Step9: tibble.set_names
Step10: tibble.drop
Step11: tibble.group_by
Step12: tibble.agg
Step13: A few things to note.
Step14: Note that grouping a datastructure has an effect on how it is sorted.
Step15: tibble.ungroup
Step16: tibble.sample_n
Step17: tibble.pipe
Step18: tibble.gather
Step19: tibble.spread
Step20: tibble.inner_join
Step21: Plotting
Step22: The nice thing about plotting is that this doesn't break your flow in kadro.
|
7,594
|
<ASSISTANT_TASK:>
Python Code:
from collections import Counter
l = [1,2,2,2,2,3,3,3,1,2,1,12,3,2,32,1,21,1,223,1]
Counter(l)
Counter('aabsbsbsbhshhbbsbs')
s = 'How many times does each word show up in this sentence word times each each word'
words = s.split()
Counter(words)
# Methods with Counter()
c = Counter(words)
c.most_common(2)
sum(c.values()) # total of all counts
c.clear() # reset all counts
list(c) # list unique elements
set(c) # convert to a set
dict(c) # convert to a regular dictionary
c.items() # convert to a list of (elem, cnt) pairs
Counter(dict(list_of_pairs)) # convert from a list of (elem, cnt) pairs
c.most_common()[:-n-1:-1] # n least common elements
c += Counter() # remove zero and negative counts
from collections import defaultdict
d = {}
d['one']
d = defaultdict(object)
d['one']
for item in d:
print item
d = defaultdict(lambda: 0)
d['one']
print 'Normal dictionary:'
d = {}
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
d['d'] = 'D'
d['e'] = 'E'
for k, v in d.items():
print k, v
print 'OrderedDict:'
d = collections.OrderedDict()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
d['d'] = 'D'
d['e'] = 'E'
for k, v in d.items():
print k, v
print 'Dictionaries are equal? '
d1 = {}
d1['a'] = 'A'
d1['b'] = 'B'
d2 = {}
d2['b'] = 'B'
d2['a'] = 'A'
print d1 == d2
print 'Dictionaries are equal? '
d1 = collections.OrderedDict()
d1['a'] = 'A'
d1['b'] = 'B'
d2 = collections.OrderedDict()
d2['b'] = 'B'
d2['a'] = 'A'
print d1 == d2
t = (12,13,14)
t[0]
from collections import namedtuple
Dog = namedtuple('Dog','age breed name')
sam = Dog(age=2,breed='Lab',name='Sammy')
frank = Dog(age=2,breed='Shepard',name="Frankie")
sam
sam.age
sam.breed
sam[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Counter() with lists
Step2: Counter with strings
Step3: Counter with words in a sentence
Step4: Common patterns when using the Counter() object
Step5: defaultdict
Step6: Can also initialize with default values
Step7: OrderedDict
Step8: An Ordered Dictionary
Step9: Equality with an Ordered Dictionary
Step10: An Ordered Dictionary
Step11: namedtuple
Step12: For simple use cases, this is usually enough. On the other hand, remembering which index should be used for each value can lead to errors, especially if the tuple has a lot of fields and is constructed far from where it is used. A namedtuple assigns names, as well as the numerical index, to each member.
Step13: We construct the namedtuple by first passing the object type name (Dog) and then passing a string with the variety of fields as a string with spaces between the field names. We can then call on the various attributes
|
7,595
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile('cifar-10-python.tar.gz'):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
'cifar-10-python.tar.gz',
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open('cifar-10-python.tar.gz') as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
normOfX = list()
minOfX = np.min(x)
maxOfX = np.max(x)
for elements in x:
normOfX.append((elements - minOfX) / (maxOfX - minOfX))
return np.array(normOfX)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
oneHotEncodedVector = np.zeros((len(x),10))
for i,j in enumerate(x):
oneHotEncodedVector[i][j] = 1
return oneHotEncodedVector
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a bach of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
return tf.placeholder(tf.float32,
shape=[None, image_shape[0], image_shape[1], image_shape[2]],
name='x')
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
return tf.placeholder(tf.float32,
shape=[None, n_classes],
name='y')
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
return tf.placeholder(tf.float32,
name='keep_prob')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# TODO: Implement Function
depth = x_tensor.get_shape().as_list()[-1]
padding = 'SAME'
conStrides = [1, *conv_strides, 1]
poolStrides = [1, *pool_strides, 1]
poolKSize = [1, *pool_ksize, 1]
biases = tf.Variable(tf.zeros(conv_num_outputs))
weights = tf.Variable(tf.truncated_normal([*conv_ksize, depth, conv_num_outputs],stddev=0.1))
conv_layer = tf.nn.conv2d(x_tensor, weights, conStrides, padding)
conv_layer = tf.nn.bias_add(conv_layer, biases)
conv_layer = tf.nn.relu(conv_layer)
conv_layer = tf.nn.max_pool(conv_layer, poolKSize,
poolStrides, padding)
return conv_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
return tf.contrib.layers.flatten(x_tensor)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
return tf.contrib.layers.fully_connected(x_tensor, num_outputs)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
return tf.contrib.layers.fully_connected(inputs = x_tensor, num_outputs=num_outputs,activation_fn=None)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
conv_num_outputs = 12
conv_ksize = (3, 3)
conv_strides = (1, 1)
pool_ksize = (2, 2)
pool_strides = (2, 2)
layer1 = conv2d_maxpool(x, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
layer2 = conv2d_maxpool(layer1, conv_num_outputs * 2, conv_ksize, conv_strides, pool_ksize, pool_strides)
layer3 = conv2d_maxpool(layer2, conv_num_outputs * 4, conv_ksize, conv_strides, pool_ksize, pool_strides)
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
flatten_layer3 = flatten(layer3)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
fully_connected_layer1 = fully_conn(flatten_layer3, 576)
fully_connected_layer1 = tf.nn.dropout(fully_connected_layer1, keep_prob)
fully_connected_layer2 = fully_conn(fully_connected_layer1, 384)
fully_connected_layer2 = tf.nn.dropout(fully_connected_layer2, keep_prob)
fully_connected_layer3 = fully_conn(fully_connected_layer2, 192)
fully_connected_layer3 = tf.nn.dropout(fully_connected_layer3, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
output_layer = output(fully_connected_layer3, 10)
# TODO: return output
return output_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
session.run(optimizer, {x: feature_batch, y: label_batch, keep_prob: keep_probability})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
train_loss = session.run(cost, {x: feature_batch, y: label_batch, keep_prob: 1.})
valid_loss = session.run(cost, {x: valid_features, y: valid_labels, keep_prob: 1.})
valid_acc = session.run(accuracy, {x: valid_features, y: valid_labels, keep_prob: 1.})
print('Train Loss: {:>10.6f}, Validation Loss: {:>10.6f}, Validation Accuracy: {:.6f}'
.format(train_loss, valid_loss, valid_acc))
# TODO: Tune Parameters
epochs = 20
batch_size = 256
keep_probability = 0.5
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_training.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for train_feature_batch, train_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: train_feature_batch, loaded_y: train_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
7,596
|
<ASSISTANT_TASK:>
Python Code:
import nengo
import numpy as np
import cPickle
import matplotlib.pyplot as plt
from matplotlib import pylab
import matplotlib.animation as animation
from scipy import linalg
%matplotlib inline
import scipy.ndimage
#Weight matrices generated by the neural network after training
#Maps the label vectors to the neuron activity of the ensemble
label_weights = cPickle.load(open("label_weights5000.p", "rb"))
#Maps the activity of the neurons to the visual representation of the image
activity_to_img_weights = cPickle.load(open("activity_to_img_weights5000.p", "rb"))
#Maps the activity of the neurons of an image with the activity of the neurons of an image rotated 6 degrees
#rotation_weights = cPickle.load(open("rotation_weights5000.p", "rb"))
#Create the pointers for the numbers
temp = np.diag([1]*10)
ZERO = temp[0]
ONE = temp[1]
TWO = temp[2]
THREE= temp[3]
FOUR = temp[4]
FIVE = temp[5]
SIX = temp[6]
SEVEN =temp[7]
EIGHT= temp[8]
NINE = temp[9]
labels =[ZERO,ONE,TWO,THREE,FOUR,FIVE,SIX,SEVEN,EIGHT,NINE]
#Visualize the one hot representation
print(ZERO)
print(ONE)
def intense(img):
newImg = img.copy()
#for i in range(len(newImg)):
# newImg[i] = np.log (newImg[i] + 1.25)
newImg[newImg < 0] = -1
newImg[newImg > 0] = 1
return newImg
#Change this to imagine different digits
imagine = FIVE
#Can also imagine combitnations of numbers (ZERO + ONE)
#Label to activity
test_activity = np.dot(imagine,label_weights)
#Image decoded
test_output_img = np.dot(test_activity, activity_to_img_weights)
#noise = np.random.random([28,28])
#test_output_img = noise+np.reshape(test_output_img,(28,28))
#clean = intense(test_output_img)
#clean = scipy.ndimage.median_filter(test_output_img, 3)
#clean = intense(clean)
clean = scipy.ndimage.gaussian_filter(test_output_img, sigma=1)
#clean = intense(clean)
#clean = scipy.ndimage.binary_opening(test_output_img)
#Edge detection?
#clean = scipy.ndimage.sobel(test_output_img, axis=0, mode='constant')
#Sharpening
#filter_blurred_f = scipy.ndimage.gaussian_filter(test_output_img, 1)
#alpha = 30
#clean = test_output_img + alpha * (test_output_img - filter_blurred_f)
plt.subplot(131)
plt.imshow(test_output_img.reshape(28,28),cmap='gray')
plt.subplot(132)
plt.imshow(clean.reshape(28,28),cmap='gray')
clean = intense(clean)
plt.subplot(133)
plt.imshow(clean.reshape(28,28),cmap='gray')
plt.show()
for i in range(7):
imagine = labels[i]
#Label to activity
test_activity = np.dot(imagine,label_weights)
#Image decoded
test_output_img = np.dot(test_activity, activity_to_img_weights)
noise = np.random.random([28,28])
test_output_img = noise+np.reshape(test_output_img,(28,28))
plt.subplot(131)
plt.imshow(test_output_img.reshape(28,28),cmap='gray')
clean = scipy.ndimage.gaussian_filter(test_output_img, sigma=1)
plt.subplot(132)
plt.imshow(clean.reshape(28,28),cmap='gray')
clean = intense(clean)
plt.subplot(133)
plt.imshow(clean.reshape(28,28),cmap='gray')
plt.show()
#Change this to visualize different digits
imagine = FIVE
#How long the animation should go for
frames=60
#Make a list of the activation of rotated images and add first frame
rot_seq = []
rot_seq.append(np.dot(imagine,label_weights)) #Map the label vector to the activity vector
test_output_img = np.dot(rot_seq[0], activity_to_img_weights) #Map the activity to the visual representation
#add the rest of the frames, using the previous frame to calculate the current frame
for i in range(1,frames):
rot_seq.append(np.dot(rot_seq[i-1],rotation_weights)) #add the activity of the current image to the list
test_output_img = np.dot(rot_seq[i], activity_to_img_weights) #map the new activity to the visual image
#Animation of rotation
fig = plt.figure()
def updatefig(i):
image_vector = np.dot(rot_seq[i], activity_to_img_weights) #map the activity to the image representation
im = pylab.imshow(np.reshape(image_vector,(28,28), 'F').T, cmap=plt.get_cmap('Greys_r'),animated=True)
return im,
ani = animation.FuncAnimation(fig, updatefig, interval=50, blit=True)
plt.show()
imagine = FIVE
test_output_img = np.dot(imagine,label_weights) #Map the label vector to the activity vector
test_output_img = np.dot(test_output_img,rotation_weights)
test_output_img = np.dot(test_output_img,linalg.inv(rotation_weights))
test_output_img = np.dot(test_output_img, activity_to_img_weights) #Map the activity to the visual representation
pylab.imshow(np.reshape(test_output_img,(28,28), 'F').T, cmap=plt.get_cmap('Greys_r'))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the weight matrices from the training
Step2: Visualize the digit from one hot representation through the activity weight matrix to the image representation
Step3: Visualize the rotation of the image using the weight matrix from activity to activity
|
7,597
|
<ASSISTANT_TASK:>
Python Code:
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import urllib2
import scipy.stats as stats
np.set_printoptions(precision=3, suppress=True)
url = ('https://raw.githubusercontent.com/Upward-Spiral-Science'
'/data/master/syn-density/output.csv')
data = urllib2.urlopen(url)
csv = np.genfromtxt(data, delimiter=",")[1:] # don't want first row (labels)
# chopping data based on thresholds on x and y coordinates
x_bounds = (409, 3529)
y_bounds = (1564, 3124)
def check_in_bounds(row, x_bounds, y_bounds):
if row[0] < x_bounds[0] or row[0] > x_bounds[1]:
return False
if row[1] < y_bounds[0] or row[1] > y_bounds[1]:
return False
if row[3] == 0:
return False
return True
indices_in_bound, = np.where(np.apply_along_axis(check_in_bounds, 1, csv, x_bounds, y_bounds))
data_thresholded = csv[indices_in_bound]
n = data_thresholded.shape[0]
total_unmasked = np.sum(data_thresholded[:, 3])
total_syn = np.sum(data_thresholded[:, 4])
print "avg density per voxel: ", total_syn/total_unmasked
a = np.apply_along_axis(lambda x:x[4]/x[3], 1, data_thresholded)
print "average per bin: ", np.average(a), ", std dev: ", np.std(a)
print "max/min bin density: ", np.max(a), ", ", np.min(a)
print np.sum(a)
hist_n, bins, _ = plt.hist(a, 5000)
plt.xlim(-.0001, .0035)
plt.show()
print "fraction: ", hist_n[0]/len(a)
ux = np.unique(data_thresholded[:, 0]).shape[0]
uy = np.unique(data_thresholded[:, 1]).shape[0]
uz = np.unique(data_thresholded[:, 2]).shape[0]
exp = ux*uy*uz
actual = data_thresholded.shape[0]
print "actual: %d, expected: %d, difference: %d" % (actual, exp, exp-actual)
# chi squared test on all bins
def synapses_over_unmasked(row):
s = (row[4]/row[3])*(64**3)
return [row[0], row[1], row[2], s]
syn_unmasked = np.apply_along_axis(synapses_over_unmasked, 1, data_thresholded)
# divide synapses/unmasked by std_dev
# syn_normalized = np.apply_along_axis(normalize_syn, 1, syn_unmasked,
# np.mean(syn_unmasked[:,-1]), np.std(syn_unmasked[:,-1]))
syn_normalized = syn_unmasked
sum_syn_norm = np.sum(syn_normalized[:, 3])
avg_syn_norm = (sum_syn_norm/n)*np.ones((n))
syn_norm_1col = syn_normalized[:, -1]
print avg_syn_norm
print syn_norm_1col
print stats.chisquare(syn_norm_1col, avg_syn_norm)
def marginalize_along_axis(axis):
unique = np.unique(syn_normalized[:, axis])
idx_dict = dict(zip(unique, range(len(unique))))
syn_per_unique = np.zeros(len(unique))
for row in syn_normalized[:,]:
syn_per_unique[idx_dict[row[axis]]] += row[-1]
return syn_per_unique
for axis in range(3):
marginalized_data = marginalize_along_axis(axis)
n = len(np.unique(syn_normalized[:, axis]))
print "n=", n
avg = sum_syn_norm/n
avg_vec = np.ones((n))*avg
print "axis = ", axis, "chisquare test: ", stats.chisquare(marginalized_data, avg_vec)
# load our regressions
from sklearn.linear_model import LinearRegression
from sklearn.svm import LinearSVR
from sklearn.neighbors import KNeighborsRegressor as KNN
from sklearn.ensemble import RandomForestRegressor as RF
from sklearn.preprocessing import PolynomialFeatures as PF
from sklearn.pipeline import Pipeline
from sklearn import cross_validation
np.random.seed(1)
names = ['Linear Regression','SVR','KNN Regression','Random Forest Regression','Polynomial Regression']
regressions = [LinearRegression(),
LinearSVR(C=1.0),
KNN(n_neighbors=10, algorithm='auto'),
RF(max_depth=5, max_features=1),
Pipeline([('poly', PF(degree=2)),('linear', LinearRegression(fit_intercept=False))])]
k_fold = 10
def normalize_syn(row, pos, scale):
row[-1] -= pos
row[-1] *= 1.0/scale
return row
syn_normalized = np.apply_along_axis(normalize_syn, 1, syn_unmasked,
np.mean(syn_unmasked[:,-1]), np.std(syn_unmasked[:,-1]))
syn_normalized = np.apply_along_axis(normalize_syn, 1, syn_unmasked,
np.min(syn_unmasked[:,-1]),
np.max(syn_unmasked[:,-1])-np.min(syn_unmasked[:,-1]))
syn_normalized = syn_unmasked
X = syn_normalized[:, [0, 1, 2]]
Y = syn_normalized[:, -1]
for name, reg in zip(names, regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: %0.2f (+/- %0.2f)" % (name, scores.mean(), scores.std() * 2))
for i in xrange(3):
X = syn_normalized[:, i].reshape(-1, 1)
Y = syn_normalized[:, -1]
print i
for name, reg in zip(names, regressions):
scores = cross_validation.cross_val_score(reg, X, Y, scoring='r2', cv=k_fold)
print("R^2 of %s: %0.2f (+/- %0.2f)" % (name, scores.mean(), scores.std() * 2))
print
import sklearn.mixture as mixture
max_clusters = 15
bic = np.array([])
i = np.array(range(1, max_clusters))
for idx in range(1, max_clusters):
gmm = mixture.GMM(n_components=idx, n_iter=1000, covariance_type='diag', random_state=1)
gmm.fit(syn_normalized)
bic = np.append(bic, gmm.bic(syn_normalized))
print bic
plt.figure(figsize=(7,7))
plt.plot(i, 1.0/bic)
plt.title('BIC')
plt.ylabel('score')
plt.xlabel('number of clusters')
plt.show()
# to keep model as simple as possible, let's assume 4 clusters
n_clusters = 4
gmm = mixture.GMM(n_components=n_clusters, n_iter=1000, covariance_type='diag', random_state=1)
clusters = [[] for i in xrange(n_clusters)]
predicted = gmm.fit_predict(syn_normalized)
for label, row in zip(predicted, syn_normalized[:,]):
clusters[label].append(row)
for i in xrange(n_clusters):
clusters[i] = np.array(clusters[i])
print "# of samples in cluster %d: %d" % (i+1, len(clusters[i]))
print "centroid: ", np.average(clusters[i], axis=0)
print "cluster covariance: "
covar = np.cov(clusters[i].T)
print covar
print "determinant of covariance matrix: ", np.linalg.det(covar)
print
# compare diagonal covariances computed to whats returned by the GMM
print gmm.covars_
# check if uniform distribution within cluster
for cluster in clusters:
sum_syn = np.sum(cluster[:, -1])
avg_syn_vec = (sum_syn/cluster.shape[0])*np.ones((cluster.shape[0]))
print stats.chisquare(cluster[:, -1], avg_syn_vec)
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
ax.view_init()
ax.dist = 10 # distance
ax.scatter(
syn_normalized[:, 0], syn_normalized[:, 1], syn_normalized[:, 2], # data
c=predicted, # marker colour
alpha=.5
)
plt.show()
#TODO: graph the centroids and diagonal covariances as ellipsoids
syn_normalized = syn_unmasked
uniques = [np.unique(syn_normalized[:, i]) for i in xrange(3)]
coord_mapping = {}
for xi, x in enumerate(uniques[0]):
for yi, y in enumerate(uniques[1]):
for zi, z in enumerate(uniques[2]):
coord_mapping[(x, y, z)] = (xi, yi, zi)
gridded_data = np.empty((len(uniques[0]), len(uniques[1]), len(uniques[2])))
for row in syn_normalized[:, ]:
coord = coord_mapping[tuple(row[:3])]
gridded_data[coord[0], coord[1], coord[2]] = row[-1]
dx = uniques[0][1]-uniques[0][0]
dy = uniques[1][1]-uniques[1][0]
dz = uniques[2][1]-uniques[2][0]
grad = np.gradient(gridded_data, dx, dy, dz)
def get_gradient_components(x, y, z):
u = grad[0][x, y, z]
v = grad[1][x, y, z]
w = grad[2][x, y, z]
return (u, v, w)
x, y, z = np.meshgrid(np.arange(1, 41, 4), np.arange(1, 81, 8), np.arange(1, 11))
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
ax.quiver(x, y, z, grad[0][1::8, 1::4, 1:], grad[1][1::8, 1::4, 1:], grad[2][1::8, 1::4, 1:], length=2, color="Tomato", alpha=.8,
arrow_length_ratio=.4)
ax.view_init(elev=18, azim=30)
ax.dist = 8
plt.show()
print "Avg. gradient: (%.4f, %.4f, %.4f)" % (np.average(grad[0]), np.average(grad[1]), np.average(grad[2]))
print "Std. dev per element: (%.4f, %.4f, %.4f)" % (np.std(grad[0]), np.std(grad[1]), np.std(grad[2]))
print grad[0]
from sklearn.decomposition import PCA
# center each variable and give it unit variance for PCA
def center(row, means, std_devs):
for idx, mean, std_dev in zip(range(4), means, std_devs):
row[idx] -= mean
row[idx] *= 1.0/std_dev
return row
syn_centered = np.apply_along_axis(center, 1, syn_normalized,
*zip(*[(np.average(syn_normalized[:, i]),
np.std(syn_normalized[:, i])) for i in range(4)]))
pca = PCA(n_components = 4)
transform = pca.fit_transform(syn_centered)
print pca.components_
print pca.explained_variance_ratio_
print transform.shape
# plot the clusters along the first 2 principal components
n_clusters = 4
gmm = mixture.GMM(n_components=n_clusters, n_iter=1000, covariance_type='diag', random_state=1)
predicted = gmm.fit_predict(syn_normalized)
plt.scatter(transform[:, 0], transform[:, 1], c=predicted, alpha=.3)
plt.show()
n_clusters = 12
gmm = mixture.GMM(n_components=n_clusters, n_iter=1000, covariance_type='diag', random_state=1)
predicted = gmm.fit_predict(syn_normalized)
plt.scatter(transform[:, 0], transform[:, 1], c=predicted, alpha=.3)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) What is the average synapse density per voxel? How does it compare to average and min/max synapse density per bin?
Step2: Note the 2 large spikes in the histogram.
Step3: 2) Is joint distribution uniform?
Step4: Conclude that the joint distribution is not uniform.
Step6: Conclude that none of the marginals are uniform.
Step7: Overall, regressions not successful.
Step8: 5) What is the optimal number of clusters on the 'cleaned' data?
Step9: Observe local maximums at 4 clusters and 12 clusters.
Step10: Observations
Step11: 7) Are distribution within clusters uniform?
Step12: 8) Graph clusters in Euclidean 3D Space
Step13: 9) Can we estimate the gradient of density?
Step14: 9.1) What's the average gradient? What's the standard dev of each component of the gradient?
Step15: 10) PCA
Step16: Observe fairly well defined boundary between clusters. Lets plot the 2D PCA when there are 12 clusters.
|
7,598
|
<ASSISTANT_TASK:>
Python Code:
%load_ext sql
%sql mysql://steinam:steinam@localhost/versicherung_complete
%%sql
-- meine Lösung
select distinct(Land) from Fahrzeughersteller;
%%sql
-- deine Lösung
select fahrzeughersteller.Land
from fahrzeughersteller
group by fahrzeughersteller.Land
;
%%sql
-- meine Lösung
select fahrzeugtyp.Bezeichnung, count(fahrzeug.iD) as Anzahl
from fahrzeugtyp left join fahrzeug
on fahrzeugtyp.id = fahrzeug.fahrzeugtyp_id
group by fahrzeugtyp.bezeichnung
having count(Anzahl) > 2
%%sql
select *, (select count(*) from fahrzeug
where fahrzeug.fahrzeugtyp_id = fahrzeugtyp.id) as Fahrzeuge
from fahrzeugtyp
having Fahrzeuge > 2
order by fahrzeugtyp.bezeichnung;
%%sql
-- meine Lösung
-- select ID from Abteilung where Abteilung.Ort = 'Dortmund' or abteilung.Ort = 'Bochum'
select Name, vorname, Bezeichnung, Abteilung.ID, Mitarbeiter.Abteilung_ID,
Abteilung.Ort from Mitarbeiter inner join Abteilung
on Mitarbeiter.Abteilung_ID = Abteilung.ID
where Abteilung.Ort in('Dortmund', 'Bochum')
order by Name
%%sql
-- deine Lösung
select mitarbeiter.Name, mitarbeiter.Vorname,
(select abteilung.bezeichnung
from abteilung where abteilung.id = mitarbeiter.abteilung_id) as Abteilung,
(select abteilung.ort
from abteilung where abteilung.id = mitarbeiter.abteilung_id) as Standort
from mitarbeiter having Standort = "Dortmund" or Standort = "Bochum";
%%sql
-- meine Lösung
select fahrzeughersteller.id, year(datum) as Jahr,
min(zuordnung_sf_fz.schadenshoehe),
max(zuordnung_sf_fz.Schadenshoehe),
(max(zuordnung_sf_fz.schadenshoehe) - min(zuordnung_sf_fz.schadenshoehe)) as Differenz
from fahrzeughersteller left join fahrzeugtyp
on fahrzeughersteller.id = fahrzeugtyp.hersteller_ID
inner join fahrzeug on fahrzeugtyp.id = fahrzeug.fahrzeugtyp_id
inner join zuordnung_sf_fz
on fahrzeug.id = zuordnung_sf_fz.fahrzeug_id
inner join schadensfall on schadensfall.id = zuordnung_sf_fz.schadensfall_id
group by fahrzeughersteller.id, year(datum)
%%sql
-- redigierte Version von Wortmann geht
select
fahrzeughersteller.Name,
(select min(zuordnung_sf_fz.schadenshoehe) from zuordnung_sf_fz
where zuordnung_sf_fz.fahrzeug_id in(
select fahrzeug.id from fahrzeug
where fahrzeug.fahrzeugtyp_id in(
select fahrzeugtyp.id from fahrzeugtyp
where fahrzeugtyp.hersteller_id = fahrzeughersteller.id
)
)
) as Kleinste,
(select max(zuordnung_sf_fz.schadenshoehe) from zuordnung_sf_fz
where zuordnung_sf_fz.fahrzeug_id in(
select fahrzeug.id from fahrzeug
where fahrzeug.fahrzeugtyp_id in(
select fahrzeugtyp.id from fahrzeugtyp
where fahrzeugtyp.hersteller_id = fahrzeughersteller.id
)
)
) as `Groesste`
from fahrzeughersteller;
%%sql
select Mitarbeiter.Name, dienstwagen.Kennzeichen
from Mitarbeiter inner join dienstwagen
on mitarbeiter.id = dienstwagen.Mitarbeiter_id
inner join fahrzeugtyp
on dienstwagen.fahrzeugtyp_Id = fahrzeugtyp.id
inner join fahrzeughersteller
on fahrzeugtyp.hersteller_id = fahrzeughersteller.id
where Fahrzeughersteller.NAme = 'Opel'
%%sql
select * from mitarbeiter
where mitarbeiter.id in(
select dienstwagen.mitarbeiter_id from dienstwagen
where
dienstwagen.mitarbeiter_id = mitarbeiter.id
and dienstwagen.fahrzeugtyp_id in(
select fahrzeugtyp.id from fahrzeugtyp
where fahrzeugtyp.hersteller_id in(
select fahrzeughersteller.id from fahrzeughersteller
where fahrzeughersteller.name = "Opel"
)
)
)
%%sql
select fahrzeug.kennzeichen, sum(schadenshoehe)
from fahrzeug inner join zuordnung_sf_fz
on fahrzeug.id = zuordnung_sf_fz.fahrzeug_id
group by fahrzeug.kennzeichen
having sum(schadenshoehe) > (select avg(schadenshoehe) from zuordnung_sf_fz)
%%sql
-- deine Lösung Wortmann
/*
select * from fahrzeug having fahrzeug.id in(
select zuordnung_sf_zf.fahrzeugtyp_id from zuordnung_sf_zf
where zuordnung_sf_zf.schadenhoehe > ((select sum(zuordnung_sf_zf.schadenhoehe) from zuordnung_sf_zf)) / (select count(*) from zuordnung_sf_zf))
*/
select * from fahrzeug having fahrzeug.id in(
select zuordnung_sf_fz.fahrzeug_id from zuordnung_sf_fz
where zuordnung_sf_fz.schadenshoehe > ((select sum(zuordnung_sf_fz.schadenshoehe) from zuordnung_sf_fz)) / (select count(*) from zuordnung_sf_fz))
%%sql
select Mitarbeiter.Name, Mitarbeiter.Geburtsdatum
from Mitarbeiter
where Geburtsdatum < (select avg(Geburtsdatum) from Mitarbeiter ma)
order by Mitarbeiter.Name
%%sql
-- geht auch
select ma.Name, ma.Geburtsdatum
from Mitarbeiter ma
where (now() - ma.Geburtsdatum) > (now() - (select avg(geburtsdatum) from mitarbeiter))
order by ma.Name;
%%sql
-- deine Lösung Wortmann
select * from mitarbeiter
having mitarbeiter.geburtsdatum < (select sum(mitarbeiter.geburtsdatum) from mitarbeiter) / (select count(*) from mitarbeiter)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gesucht wird eine wiederholungsfreie Liste der Herstellerländer 3 P
Step2: Listen Sie alle Fahrzeugtypen und die Anzahl Fahrzeuge dieses Typs, aber nur, wenn mehr als 2 Fahrzeuge des Typs vorhanden sind. Sortieren Sie die Ausgabe nach Fahrzeugtypen. 4 P
Step3: Ermittle die Namen und Vornamen der Mitarbeiter incl. Abteilungsname, deren Abteilung ihren Sitz in Dortmund oder Bochum hat.
Step4: Gesucht wird für jeden Fahrzeughersteller (Angabe der ID reicht) und jedes Jahr die kleinste und größte Schadenshöhe.
Step5: Zeige alle Mitarbeiter und deren Autokennzeichen, die als Dienstwagen einen Opel fahren.
Step6: Welche Fahrzeuge haben Schäden verursacht, deren Schadenssumme höher als die durchschnittliche Schadenshöhe sind. 5 P
Step7: Welche Mitarbeiter sind älter als das Durchschnittsalter der Mitarbeiter. 4 P
|
7,599
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import *
from numpy import *
def problem08(start, max_value, addition):
true_val = pi**4/90
total = 0
i = start
while i > max_value or i < max_value:
# Always add float32 (single prec. value to total)
total += np.float32(1/(i**4))
i += addition
# Numerical true error
enum = np.float32(true_val) - np.float32(total)
# True percent relative error
eper = enum / true_val
print("End result is {}, true percent relative error {} %".format(np.float32(total), eper * 100))
problem08(1, 10000, 1)
problem08(10000, 0, -1)
def problem01(selection):
if selection == "a":
print("Matti")
elif selection == "b":
print("Teppo")
elif selection == "c":
# Create graph based on
x = np.arange(-2, 2, 0.001)
y = x**5 - x**2 + 2
# Create the graph
plt.plot(x, y, marker=".", linestyle=":")
# Mark known root
plt.plot(0, 2.0, 'ro:')
# Mark known minima
plt.plot(0.737007, 1.67427, 'yo:')
plt.grid()
plt.show()
else:
print("Invalid selection")
#problem01("a")
#problem01("b")
problem01("c")
def problem02(selection):
if selection == "a":
x = np.arange(-1, 3, 0.0001)
y1 = x ** 3
y2 = 4 - 2 * x
plt.plot(x, y1)
plt.plot(x, y2)
idx = np.argwhere(np.diff(np.sign(y1 - y2)) != 0).reshape(-2) + 0
plt.plot(x[idx], y1[idx], 'ro')
print(x[idx])
print(y1[idx])
plt.grid()
plt.show()
elif selection == "b":
x = np.arange(-1, 3, 0.001)
y = x ** 3 + 2 * x - 4
plt.plot(x, y)
plt.grid()
plt.show()
else:
print("Invalid selection")
problem02("a")
#problem02("b")
def problem03(selection):
if selection == "a":
x = np.arange(-4, 4, 0.01)
y = (2 * x + 1)/(x - 3)
# Infinites in graph, limit y
ylim([-50,50])
xlim([-4, 4])
plt.plot(x, y)
# Draw asymptones
plt.plot((3, 3), (-100, 100), 'r-')
plt.plot((-100, 100), (2, 2), 'r-')
plt.grid()
plt.show()
elif selection == "b":
print("No calculations")
else:
print("Invalid selection")
problem03("a")
#problem03("b")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 08
Step2: <b>Results without specifying single precision (float32)
Step3: We can analyse the graph provided and find out the equation has only one root (y = 2.0)
Step4: Actual intersection
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.