markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Sentiment for individual stocks
stocks = ["GME", "AMC", "AMD","AMZN", "PLTR", "NVDA"] for stock in stocks: gme_posts = df.loc[df["body"].str.contains(stock),:] plot_sentiment(gme_posts,title=f"{stock}-normalized", normalize=True) plot_sentiment(gme_posts,title=f"{stock}-unnormalized", normalize=False)
_____no_output_____
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
Analyzing Stock Data
def get_daily_sentiment(df, stock): # intialize df with all dates in august datelist = pd.date_range(datetime(2021,8,1), periods=31).tolist() sentiment_df = pd.DataFrame({"date":datelist}) sentiment_df = sentiment_df.set_index("date") # get all posts mentioning stock posts = df.loc[df["body"].str.contains(stock),:] bins, counts = sentiment_bins(posts) # get number of posts in each bin for name,values in bins.items(): values = values values.index = pd.to_datetime(values.index) values = values.rename(columns={"sentiment":name}) sentiment_df = sentiment_df.join(values) # get the total number of posts for each day counts.index = pd.to_datetime(counts.index) counts = counts.rename(columns={"sentiment":"count"}) sentiment_df = sentiment_df.join(counts) sentiment_df = sentiment_df.fillna(0) return sentiment_df def load_stocks(filename="drive/MyDrive/Stock Prices.csv"): stonks = pd.read_csv(filename) # add missing dates to df stonks.Date = pd.to_datetime(stonks.Date) datelist = pd.date_range(datetime(2021,8,1), periods=31).tolist() dates = pd.DataFrame({"Date":datelist}) stonks_df = pd.merge(dates, stonks, on="Date", how="left") # fill the null values using closest date stonks_df = stonks_df.interpolate(method='nearest') stonks_df = stonks_df.set_index('Date') return stonks_df # we identified the forum's favourite stocks stocks = ["GME", "AMC", "AMD","AMZN", "PLTR", "NVDA"] # retrieve the sentiment informationn for each stock sentiment_df = {stock: get_daily_sentiment(df, stock) for stock in stocks} stonks_df = load_stocks()
_____no_output_____
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
visualize stock prices
def scale(x): minx = np.min(x); maxx = np.max(x) return (x-minx) / (maxx-minx) for stock in stocks: # plot scaled price against the number of posts price = stonks_df.dropna(axis=0)[[stock]] num_posts = sentiment_df[stock]["count"].loc[price.index,].values plt.plot(price.index, scale(price), alpha=0.7) plt.plot(price.index, scale(num_posts), alpha=0.7) plt.xticks(rotation=20) plt.title(f"{stock}: scaled stock price vs number of posts") plt.legend(["stock price", "posts count"]) plt.show() stonks_log = np.log(stonks_df) for stock in stocks: returns = stonks_df.diff().dropna(axis=0)[[stock]] log_returns = stonks_log.diff().dropna(axis=0)[[stock]] num_posts = sentiment_df[stock]["count"].loc[returns.index,].values plt.plot(returns.index, scale(returns), alpha=0.7) plt.plot(returns.index, scale(log_returns),alpha=0.7) plt.plot(returns.index, scale(num_posts),alpha=0.7) plt.xticks(rotation=20) plt.title(f"{stock}: returns vs number of posts, scaled") plt.legend(["returns", "log returns", "posts count"]) plt.show()
_____no_output_____
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
sentiment vs stock prices
# from sklearn.linear_model import LinearRegression import statsmodels.api as sm for stock in stocks: # get stock prices y = stonks_df.dropna(axis=0)[[stock]] print("="*50) print(f'name: {stock}, total:{sum(sentiment_df[stock]["count"])}') print("="*50) for col in sentiment_df[stock].columns: # fit a linear model using the number of posts in each bin X = sentiment_df[stock][col].loc[y.index,].values X = sm.add_constant(X) mod = sm.OLS(y,X) res = mod.fit() print(f'{col}: {res.rsquared:.3f}, pval: {res.pvalues.x1:.3f}') norm_df = {} for stock in sentiment_df: norm_df[stock] = sentiment_df[stock].copy() for col in norm_df[stock].columns: if col != "count": norm_df[stock][col] = norm_df[stock][col]/ norm_df[stock]["count"] import statsmodels.api as sm names = ["intercept"] + list(norm_df["GME"].columns) for stock in stocks: # get stock prices y = stonks_df.dropna(axis=0)[[stock]] print("="*50) print(f'name: {stock}, total:{sum(sentiment_df[stock]["count"])}') print("="*50) # fit a linear model using the number of posts in each bin X = norm_df[stock].loc[y.index,:].values X = sm.add_constant(X) mod = sm.OLS(y,X) res = mod.fit() print(f'{"rsquared"}: {res.rsquared:.3f}') for name, pval in zip(names, res.pvalues): print(f'{name}: {pval:.3f}')
================================================== name: GME, total:7867 ================================================== rsquared: 0.381 intercept: 0.000 positive: 0.111 negative: 0.100 neutral: 0.028 count: 0.056 ================================================== name: AMC, total:5130 ================================================== rsquared: 0.206 intercept: 0.000 positive: 0.178 negative: 0.371 neutral: 0.061 count: 0.229 ================================================== name: AMD, total:3060 ================================================== rsquared: 0.511 intercept: 0.000 positive: 0.000 negative: 0.003 neutral: 0.000 count: 0.000 ================================================== name: AMZN, total:1330 ================================================== rsquared: 0.081 intercept: 0.000 positive: 0.000 negative: 0.000 neutral: 0.000 count: 0.313 ================================================== name: PLTR, total:3223 ================================================== rsquared: 0.021 intercept: 0.000 positive: 0.001 negative: 0.135 neutral: 0.054 count: 0.822 ================================================== name: NVDA, total:1799 ================================================== rsquared: 0.104 intercept: 0.000 positive: 0.000 negative: 0.117 neutral: 0.000 count: 0.278
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
sentiment vs stock direction
from sklearn.metrics import roc_auc_score stonks_diff = stonks_df.diff() for stock in stocks: # check if stock increased y = (stonks_diff.dropna(axis=0)[[stock]] > 0) * 1 print("="*50) print(f'name: {stock}, total:{sum(sentiment_df[stock]["count"])}') print("="*50) for col in sentiment_df[stock].columns: # fit a linear model using the number of posts in each bin X = sentiment_df[stock][col].loc[y.index,].values X = sm.add_constant(X) log_reg = sm.Logit(y, X).fit(disp=False) ypred = log_reg.predict(X) score = roc_auc_score(y.values, ypred) acc = np.mean((ypred > 0.5) == y.values) print(f'{col}: auc:{score:.3f}, acc:{acc:.3f}') from sklearn.metrics import roc_auc_score names = ["intercept"] + list(norm_df["GME"].columns) stonks_diff = stonks_df.diff() for stock in stocks: # get stock prices y = (stonks_diff.dropna(axis=0)[[stock]] > 0) * 1 print("="*50) print(f'name: {stock}, total:{sum(sentiment_df[stock]["count"])}') print("="*50) # fit a linear model using the number of posts in each bin X = norm_df[stock].loc[y.index,:].values X = sm.add_constant(X) log_reg = sm.Logit(y, X).fit(disp=False) ypred = log_reg.predict(X) score = roc_auc_score(y.values, ypred) acc = np.mean((ypred > 0.5) == y.values) print(f'{col}: auc:{score:.3f}, acc:{acc:.3f}')
================================================== name: GME, total:7867 ================================================== count: auc:0.639, acc:0.612 ================================================== name: AMC, total:5130 ================================================== count: auc:0.561, acc:0.592 ================================================== name: AMD, total:3060 ================================================== count: auc:0.644, acc:0.612 ================================================== name: AMZN, total:1330 ================================================== count: auc:0.839, acc:0.571 ================================================== name: PLTR, total:3223 ================================================== count: auc:0.740, acc:0.515 ================================================== name: NVDA, total:1799 ================================================== count: auc:0.626, acc:0.584
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
sentiment vs returns
stonks_diff = stonks_df.diff() for stock in stocks: y = stonks_diff.dropna(axis=0)[[stock]] print("="*50) print(f'name: {stock}') print("="*50) for col in sentiment_df[stock].columns: X = sentiment_df[stock][col].loc[y.index,].values X = sm.add_constant(X) mod = sm.OLS(y,X) res = mod.fit() print(f'{col}: {res.rsquared:.3f}, pval: {res.pvalues.x1:.3f}') # incorporate the sentiment for each day as well names = ["intercept"] + list(norm_df["GME"].columns) stonks_diff = stonks_df.diff() for stock in stocks: # get stock prices y = stonks_diff.dropna(axis=0)[[stock]] print("="*50) print(f'name: {stock}, total:{sum(sentiment_df[stock]["count"])}') print("="*50) # fit a linear model using the number of posts in each bin X = norm_df[stock].loc[y.index,:].values X = sm.add_constant(X) mod = sm.OLS(y,X) res = mod.fit() print(f'{"rsquared"}: {res.rsquared:.3f}') for name, pval in zip(names, res.pvalues): print(f'{name}: {pval:.3f}')
================================================== name: GME, total:7867 ================================================== rsquared: 0.314 intercept: 0.271 positive: 0.314 negative: 0.761 neutral: 0.503 count: 0.005 ================================================== name: AMC, total:5130 ================================================== rsquared: 0.059 intercept: 0.453 positive: 0.603 negative: 0.601 neutral: 0.885 count: 0.291 ================================================== name: AMD, total:3060 ================================================== rsquared: 0.082 intercept: 0.481 positive: 0.763 negative: 0.698 neutral: 0.903 count: 0.163 ================================================== name: AMZN, total:1330 ================================================== rsquared: 0.165 intercept: 0.770 positive: 0.436 negative: 0.251 neutral: 0.046 count: 0.308 ================================================== name: PLTR, total:3223 ================================================== rsquared: 0.284 intercept: 0.531 positive: 0.355 negative: 0.895 neutral: 0.466 count: 0.007 ================================================== name: NVDA, total:1799 ================================================== rsquared: 0.049 intercept: 0.855 positive: 0.642 negative: 0.634 neutral: 0.981 count: 0.300
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
sentiment vs log returns
stonks_log = np.log(stonks_df) for stock in stocks: y = stonks_log.diff().dropna(axis=0)[[stock]] print("="*50) print(f'name: {stock}') print("="*50) for col in sentiment_df[stock].columns: X = sentiment_df[stock][col].loc[y.index,].values X = sm.add_constant(X) mod = sm.OLS(y,X) res = mod.fit() print(f'{col}: {res.rsquared:.3f}, pval: {res.pvalues.x1:.3f}')
================================================== name: GME ================================================== positive: 0.287, pval: 0.003 negative: 0.212, pval: 0.014 neutral: 0.289, pval: 0.003 count: 0.272, pval: 0.004 ================================================== name: AMC ================================================== positive: 0.035, pval: 0.341 negative: 0.003, pval: 0.775 neutral: 0.057, pval: 0.219 count: 0.035, pval: 0.342 ================================================== name: AMD ================================================== positive: 0.071, pval: 0.171 negative: 0.047, pval: 0.270 neutral: 0.081, pval: 0.143 count: 0.071, pval: 0.171 ================================================== name: AMZN ================================================== positive: 0.014, pval: 0.554 negative: 0.056, pval: 0.225 neutral: 0.003, pval: 0.797 count: 0.017, pval: 0.512 ================================================== name: PLTR ================================================== positive: 0.294, pval: 0.003 negative: 0.202, pval: 0.016 neutral: 0.202, pval: 0.016 count: 0.243, pval: 0.008 ================================================== name: NVDA ================================================== positive: 0.031, pval: 0.372 negative: 0.050, pval: 0.253 neutral: 0.041, pval: 0.300 count: 0.042, pval: 0.297
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
The
_____no_output_____
MIT
wsb_sentiment.ipynb
kenzeng24/wsb-analysis
NumPy arrays Nikolay Koldunovkoldunovn@gmail.com This is part of [**Python for Geosciences**](https://github.com/koldunovn/python_for_geosciences) notes. ================ - a powerful N-dimensional array object- sophisticated (broadcasting) functions- tools for integrating C/C++ and Fortran code- useful linear algebra, Fourier transform, and random number capabilities
#allow graphics inline %matplotlib inline import matplotlib.pylab as plt #import plotting library import numpy as np #import numpy library np.set_printoptions(precision=3) # this is just to make the output look better
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Load data I am going to use some real data as an example of array manipulations. This will be the AO index downloaded by wget through a system call (you have to be on Linux of course):
!wget www.cpc.ncep.noaa.gov/products/precip/CWlink/daily_ao_index/monthly.ao.index.b50.current.ascii
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
This is how data in the file look like (we again use system call for *head* command):
!head monthly.ao.index.b50.current.ascii
1950 1 -0.60310E-01 1950 2 0.62681E+00 1950 3 -0.81275E-02 1950 4 0.55510E+00 1950 5 0.71577E-01 1950 6 0.53857E+00 1950 7 -0.80248E+00 1950 8 -0.85101E+00 1950 9 0.35797E+00 1950 10 -0.37890E+00
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Load data in to a variable:
ao = np.loadtxt('monthly.ao.index.b50.current.ascii') ao ao.shape
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
So it's a *row-major* order. Matlab and Fortran use *column-major* order for arrays.
type(ao)
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Numpy arrays are statically typed, which allow faster operations
ao.dtype
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
You can't assign value of different type to element of the numpy array:
ao[0,0] = 'Year'
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Slicing works similarly to Matlab:
ao[0:5,:]
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
One can look at the data. This is done by matplotlib.pylab module that we have imported in the beggining as `plt`. We will plot only first 780 poins:
plt.plot(ao[:780,2])
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Index slicing In general it is similar to Matlab First 12 elements of **second** column (months). Remember that indexing starts with 0:
ao[0:12,1]
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
First raw:
ao[0,:]
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
We can create mask, selecting all raws where values in second raw (months) equals 10 (October):
mask = (ao[:,1]==10)
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Here we apply this mask and show only first 5 rowd of the array:
ao[mask][:5,:]
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
You don't have to create separate variable for mask, but apply it directly. Here instead of first five rows I show five last rows:
ao[ao[:,1]==10][-5:,:]
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
You can combine conditions. In this case we select October-December data (only first 10 elements are shown):
ao[(ao[:,1]>=10)&(ao[:,1]<=12)][0:10,:]
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
You can assighn values to subset of values (*thi expression fixes the problem with very small value at 2015-04*)
ao[ao<-10]=0
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Basic operations Create example array from first 12 values of second column and perform some basic operations:
months = ao[0:12,1] months months+10 months*20 months*months
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Basic statistics Create *ao_values* that will contain onlu data values:
ao_values = ao[:,2]
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Simple statistics:
ao_values.min() ao_values.max() ao_values.mean() ao_values.std() ao_values.sum()
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
You can also use *np.sum* function:
np.sum(ao_values)
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
One can make operations on the subsets:
np.mean(ao[ao[:,1]==1,2]) # January monthly mean
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Result will be the same if we use method on our selected data:
ao[ao[:,1]==1,2].mean()
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Saving data You can save your data as a text file
np.savetxt('ao_only_values.csv',ao[:, 2], fmt='%.4f')
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Head of resulting file:
!head ao_only_values.csv
-0.0603 0.6268 -0.0081 0.5551 0.0716 0.5386 -0.8025 -0.8510 0.3580 -0.3789
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
You can also save it as binary:
f=open('ao_only_values.bin', 'w') ao[:,2].tofile(f) f.close()
_____no_output_____
CC-BY-3.0
03 - NumPy arrays.ipynb
davibortolotti/python_for_geosciences
Creating your own dataset from Google Images*by: Francisco Ingham and Jeremy Howard. Inspired by [Adrian Rosebrock](https://www.pyimagesearch.com/2017/12/04/how-to-create-a-deep-learning-dataset-using-google-images/)* In this tutorial we will see how to easily create an image dataset through Google Images. **Note**: You will have to repeat these steps for any new category you want to Google (e.g once for dogs and once for cats).
%reload_ext autoreload %autoreload 2 %matplotlib inline # You need to mount your google drive to the /content/gdrive folder of your virtual computer # located in the colab server from google.colab import drive drive.mount("/content/gdrive") #drive.mount("/content/gdrive", force_remount=True) from fastai.vision import *
_____no_output_____
Apache-2.0
MidTermPart2.ipynb
moonryul/course-v3
Get a list of URLs Search and scroll Question 1: (1.1) Please download 3 categories of animal images from google. Download about 100 images for each category. Go to [Google Images](http://images.google.com) and search for the images you are interested in. The more specific you are in your Google Search, the better the results and the less manual pruning you will have to do.Scroll down until you've seen all the images you want to download, or until you see a button that says 'Show more results'. All the images you scrolled past are now available to download. To get more, click on the button, and continue scrolling. The maximum number of images Google Images shows is 700.It is a good idea to put things you want to exclude into the search query, for instance if you are searching for the Eurasian wolf, "canis lupus lupus", it might be a good idea to exclude other variants: "canis lupus lupus" -dog -arctos -familiaris -baileyi -occidentalisYou can also limit your results to show only photos by clicking on Tools and selecting Photos from the Type dropdown. Download into file Question 1 (1.2) Move the downloaded files to your google dirve and make the names of the files in the form of *.csv. Now you must run some Javascript code in your browser which will save the URLs of all the images you want for you dataset.In Google Chrome press Ctrl+Shift+j on Windows/Linux and CmdOptj on macOS, and a small window the javascript 'Console' will appear. In Firefox press CtrlShiftk on Windows/Linux or CmdOptk on macOS. That is where you will paste the JavaScript commands.You will need to get the urls of each of the images. Before running the following commands, you may want to disable ad blocking extensions (uBlock, AdBlockPlus etc.) in Chrome. Otherwise the window.open() command doesn't work. Then you can run the following commands:```javascripturls=Array.from(document.querySelectorAll('.rg_i')).map(el=> el.hasAttribute('data-src')?el.getAttribute('data-src'):el.getAttribute('data-iurl'));window.open('data:text/csv;charset=utf-8,' + escape(urls.join('\n')));``` upload urls file into /content folder You will need to run this cell once per each category. The following is an illustration.
path = Path('gdrive/My Drive/fastai-v3/data/bears')
_____no_output_____
Apache-2.0
MidTermPart2.ipynb
moonryul/course-v3
Download images Now you will need to download your images from their respective urls.fast.ai has a function that allows you to do just that. You just have to specify the urls filename as well as the destination folder and this function will download and save all images that can be opened. If they have some problem in being opened, they will not be saved.Let's download our images! Notice you can choose a maximum number of images to be downloaded. In this case we will not download all the urls.You will need to run this line once for every category. The following is an illustration.
classes = ['teddys','grizzly','black'] # For example, Do this when download "urls_black.csv' file: folder = 'teddys' dest = path/folder file = 'urls_teddy.csv' download_images(dest/file, dest, max_pics=100) # Question 2: Explain what happens when you execute download_images() statement. for c in classes: print(c) verify_images(path/c, delete=True, max_size=500)
teddys
Apache-2.0
MidTermPart2.ipynb
moonryul/course-v3
View data
np.random.seed(42) data = ImageDataBunch.from_folder(path, train=".", valid_pct=0.2, ds_tfms=get_transforms(), size=224, num_workers=4).normalize(imagenet_stats # Question 3: Explain how the categories of the images are extracted when you execute the above statement.
_____no_output_____
Apache-2.0
MidTermPart2.ipynb
moonryul/course-v3
Good! Let's take a look at some of our pictures then. Train model
learn = cnn_learner(data, models.resnet34, metrics=error_rate) # Question 4: 4.1) cnn_learner() has input paramters other than the shown above. # One of them is pretrained, which is True by default when you do not specify it. # What happens when you specify pretrained=True as in # learn = cnn_learner(data, models.resnet34, metrics=error_rate, pretrained=False) interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() # Question 5: What does your confusion matrix tell you about the prediction capability of your neural network? # Explain in a conscise manner but do not omit important points. #Question 6: use interp.plot_top_losses() to find out the prediction capability of your neural network? # Explain in a conscise manner but do not omit important points.
_____no_output_____
Apache-2.0
MidTermPart2.ipynb
moonryul/course-v3
Python for Finance (2nd ed.)**Mastering Data-Driven Finance**&copy; Dr. Yves J. Hilpisch | The Python Quants GmbH Data Analysis with pandas pandas Basics First Steps with DataFrame Class
import pandas as pd df = pd.DataFrame([10, 20, 30, 40], columns=['numbers'], index=['a', 'b', 'c', 'd']) df df.index df.columns df.loc['c'] df.loc[['a', 'd']] df.iloc[1:3] df.sum() df.apply(lambda x: x ** 2) df ** 2 df['floats'] = (1.5, 2.5, 3.5, 4.5) df df['floats'] df['names'] = pd.DataFrame(['Yves', 'Sandra', 'Lilli', 'Henry'], index=['d', 'a', 'b', 'c']) df df.append({'numbers': 100, 'floats': 5.75, 'names': 'Jil'}, ignore_index=True) df = df.append(pd.DataFrame({'numbers': 100, 'floats': 5.75, 'names': 'Jil'}, index=['y',])) df df = df.append(pd.DataFrame({'names': 'Liz'}, index=['z',]), sort=False) df df.dtypes df[['numbers', 'floats']].mean() df[['numbers', 'floats']].std()
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Second Steps with DataFrame Class
import numpy as np np.random.seed(100) a = np.random.standard_normal((9, 4)) a df = pd.DataFrame(a) df df.columns = ['No1', 'No2', 'No3', 'No4'] df df['No2'].mean() dates = pd.date_range('2019-1-1', periods=9, freq='M') dates df.index = dates df df.values np.array(df)
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Basic Analytics
df.info() df.describe() df.sum() df.mean() df.mean(axis=0) df.mean(axis=1) df.cumsum() np.mean(df) # raises warning np.log(df) np.sqrt(abs(df)) np.sqrt(abs(df)).sum() 100 * df + 100
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Basic Visualization
from pylab import plt, mpl plt.style.use('seaborn') mpl.rcParams['font.family'] = 'serif' %matplotlib inline df.cumsum().plot(lw=2.0, figsize=(10, 6)); # plt.savefig('../../images/ch05/pd_plot_01.png') df.plot.bar(figsize=(10, 6), rot=30); # df.plot(kind='bar', figsize=(10, 6)) # plt.savefig('../../images/ch05/pd_plot_02.png')
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Series Class
type(df) S = pd.Series(np.linspace(0, 15, 7), name='series') S type(S) s = df['No1'] s type(s) s.mean() s.plot(lw=2.0, figsize=(10, 6)); # plt.savefig('../../images/ch05/pd_plot_03.png')
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
GroupBy Operations
df['Quarter'] = ['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2', 'Q3', 'Q3', 'Q3'] df groups = df.groupby('Quarter') groups.size() groups.mean() groups.max() groups.aggregate([min, max]).round(2) df['Odd_Even'] = ['Odd', 'Even', 'Odd', 'Even', 'Odd', 'Even', 'Odd', 'Even', 'Odd'] groups = df.groupby(['Quarter', 'Odd_Even']) groups.size() groups[['No1', 'No4']].aggregate([sum, np.mean])
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Complex Selection
data = np.random.standard_normal((10, 2)) df = pd.DataFrame(data, columns=['x', 'y']) df.info() df.head() df.tail() df['x'] > 0.5 (df['x'] > 0) & (df['y'] < 0) (df['x'] > 0) | (df['y'] < 0) df[df['x'] > 0] df.query('x > 0') df[(df['x'] > 0) & (df['y'] < 0)] df.query('x > 0 & y < 0') df[(df.x > 0) | (df.y < 0)] df > 0 df[df > 0]
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Concatenation, Joining and Merging
df1 = pd.DataFrame(['100', '200', '300', '400'], index=['a', 'b', 'c', 'd'], columns=['A',]) df1 df2 = pd.DataFrame(['200', '150', '50'], index=['f', 'b', 'd'], columns=['B',]) df2
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Concatenation
df1.append(df2, sort=False) df1.append(df2, ignore_index=True, sort=False) pd.concat((df1, df2), sort=False) pd.concat((df1, df2), ignore_index=True, sort=False)
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Joining
df1.join(df2) df2.join(df1) df1.join(df2, how='left') df1.join(df2, how='right') df1.join(df2, how='inner') df1.join(df2, how='outer') df = pd.DataFrame() df['A'] = df1['A'] df df['B'] = df2 df df = pd.DataFrame({'A': df1['A'], 'B': df2['B']}) df
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Merging
c = pd.Series([250, 150, 50], index=['b', 'd', 'c']) df1['C'] = c df2['C'] = c df1 df2 pd.merge(df1, df2) pd.merge(df1, df2, on='C') pd.merge(df1, df2, how='outer') pd.merge(df1, df2, left_on='A', right_on='B') pd.merge(df1, df2, left_on='A', right_on='B', how='outer') pd.merge(df1, df2, left_index=True, right_index=True) pd.merge(df1, df2, on='C', left_index=True) pd.merge(df1, df2, on='C', right_index=True) pd.merge(df1, df2, on='C', left_index=True, right_index=True)
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
Performance Aspects
data = np.random.standard_normal((1000000, 2)) data.nbytes df = pd.DataFrame(data, columns=['x', 'y']) df.info() %time res = df['x'] + df['y'] res[:3] %time res = df.sum(axis=1) res[:3] %time res = df.values.sum(axis=1) res[:3] %time res = np.sum(df, axis=1) res[:3] %time res = np.sum(df.values, axis=1) res[:3] %time res = df.eval('x + y') res[:3] %time res = df.apply(lambda row: row['x'] + row['y'], axis=1) res[:3]
_____no_output_____
CNRI-Python
code/ch05/05_pandas.ipynb
meaninginuse/py4fi2nd
RAC/DVR step 1: diagonalize **H**($\lambda$)
import numpy as np import sys import matplotlib.pyplot as plt %matplotlib qt5 import pandas as pd # # extend path by location of the dvr package # sys.path.append('../../Python_libs') import dvr import jolanta amu_to_au=1822.888486192 au2cm=219474.63068 au2eV=27.211386027 Angs2Bohr=1.8897259886 # # Jolanata-3D parameters a, b, c: (0.028, 1.0, 0.028) # # CS-DVR: # bound state: -7.17051 eV # resonance (3.1729556 - 0.16085j) eV # jparam=(0.028, 1.0, 0.028) # # compute DVR of T and V # then show the density of states # in a potential + energy-levels plot # the standard 3D-Jolanta is used (resonance at 1.75 -0.2i eV) # rmin=0 rmax=12 # grid from 0 to rmax thresh = 8 # maximum energy for plot ppB = 15 # grid points per Bohr nGrid=int((rmax-rmin)*ppB) rs = dvr.DVRGrid(rmin, rmax, nGrid) Vs = jolanta.Jolanta_3D(rs, jparam) Ts = dvr.KineticEnergy(1, rmin, rmax, nGrid) [energy, wf] = dvr.DVRDiag2(nGrid, Ts, Vs, wf=True) n_ene=0 for i in range(nGrid): print("%3d %12.8f au = %12.5f eV" % (i+1, energy[i], energy[i]*au2eV)) n_ene += 1 if energy[i]*au2eV > thresh: break # "DVR normalization", sum(wf[:,0]**2) # this is correct for plotting c=["orange", "blue"] #h=float(xmax) / (nGrid+1.0) scale=3*au2eV plt.cla() plt.plot(rs,Vs*au2eV, '-', color="black") for i in range(n_ene): plt.plot(rs, scale*wf[:,i]**2+energy[i]*au2eV, '-', color=c[i%len(c)]) plt.ylim(-8, 1.5*thresh) plt.xlabel('$r$ [Bohr]') plt.ylabel('$E$ [eV]') plt.show()
1 -0.26351095 au = -7.17050 eV 2 0.11989697 au = 3.26256 eV 3 0.28142119 au = 7.65786 eV 4 0.52212147 au = 14.20765 eV
MIT
notebooks/RAC_3D/.ipynb_checkpoints/RAC-DVR-J3D-checkpoint.ipynb
tsommerfeld/L2-methods_for_resonances
RAC by increasing $b$The last energy needs to be about $7E_r \approx 22$eV
# # show the potential # a_ref, b_ref, c_ref = jparam plt.cla() for b_curr in [1.1, 1.3, 1.5, 1.7]: param = [a_ref, b_curr, c_ref] plt.plot(rs, jolanta.Jolanta_3D(rs, param)*au2eV) plt.ylim(-30, 10) plt.show() a_ref, b_ref, c_ref = jparam b_min=b_ref b_max=2.5 nEs_keep=4 # how many energies are kept n_b=101 bs=np.linspace(b_min, b_max, num=n_b, endpoint=True) run_data = np.zeros((n_b, nEs_keep+1)) # array used to collect all eta-run data run_data[:,0]=bs for l, b_curr in enumerate(bs): param = [a_ref, b_curr, c_ref] Vs = jolanta.Jolanta_3D(rs, param) energy = dvr.DVRDiag2(nGrid, Ts, Vs) run_data[l,1:] = au2eV*energy[0:nEs_keep] print(l+1, end=" ") if (l+1)%10==0: print() print(run_data[-1,:]) plt.cla() for i in range(0, nEs_keep): plt.plot(bs, run_data[:,i+1], 'o-') plt.ylim(-25,5) plt.show() cols = ['z'] for i in range(nEs_keep): cols.append('E'+str(i+1)) df = pd.DataFrame(run_data, columns=cols) df.to_csv('rac_DVR_3D_b-scale_rmax_12.csv', index=False) df.head(5)
_____no_output_____
MIT
notebooks/RAC_3D/.ipynb_checkpoints/RAC-DVR-J3D-checkpoint.ipynb
tsommerfeld/L2-methods_for_resonances
RAC with Coulomb potential
# # show the potential # def coulomb(r, lbd=1.0): """ attractive Coulomb potential with strength lbd = lamda """ return -lbd/r plt.cla() for l_curr in [0, 0.5, 1.0, 1.5, 2.0]: plt.plot(rs, (jolanta.Jolanta_3D(rs, jparam)+coulomb(rs, lbd=l_curr))*au2eV) #plt.xlim(0,15) plt.ylim(-30, 10) plt.show() l_min=0.0 l_max=2.6 nEs_keep=4 # how many energies are kept npts=101 ls=np.linspace(l_min, l_max, num=npts, endpoint=True) run_data = np.zeros((npts, nEs_keep+1)) # array used to collect all eta-run data run_data[:,0]=ls VJs = jolanta.Jolanta_3D(rs, jparam) Ws = coulomb(rs, lbd=1.0) for j, l_curr in enumerate(ls): Vs = VJs + l_curr*Ws energy = dvr.DVRDiag2(nGrid, Ts, Vs) run_data[j,1:] = au2eV*energy[0:nEs_keep] print(j+1, end=" ") if (j+1)%10==0: print() print(run_data[-1,:]) plt.cla() for i in range(0, nEs_keep): plt.plot(ls, run_data[:,i+1], 'o-') plt.ylim(-25,5) plt.show() cols = ['z'] for i in range(nEs_keep): cols.append('E'+str(i+1)) df = pd.DataFrame(run_data, columns=cols) df.to_csv('rac_DVR_3D_coulomb_rmax_12.csv', index=False) df.head(5)
_____no_output_____
MIT
notebooks/RAC_3D/.ipynb_checkpoints/RAC-DVR-J3D-checkpoint.ipynb
tsommerfeld/L2-methods_for_resonances
RAC with soft-box
# # show the box potential # def softbox(r, rcut=1.0, lbd=1.0): """ Softbox: -1 at the origin, rises at r0 softly to asymptotic 0 based on Gaussian with inverted scale """ return lbd*(np.exp(-(2*rcut)**2/r**2) - 1) plt.cla() for l_curr in [0.1, 0.2, 0.3, 0.4, 0.5]: Vs = jolanta.Jolanta_3D(rs, jparam) Ws = softbox(rs, rcut=5.0, lbd=l_curr) plt.plot(rs, Ws*au2eV) plt.xlim(0,20) plt.ylim(-15, 0) plt.show() # # show the full potential # plt.cla() for l_curr in [0.1, 0.2, 0.3, 0.4, 0.5]: Vs = jolanta.Jolanta_3D(rs, jparam) Ws = softbox(rs, rcut=3.0, lbd=l_curr) plt.plot(rs, (Vs+Ws)*au2eV) #plt.xlim(0,20) plt.ylim(-30, 8) plt.show() l_min=0.0 l_max=1.2 nEs_keep=4 # how many energies are kept npts=101 ls=np.linspace(l_min, l_max, num=npts, endpoint=True) run_data = np.zeros((npts, nEs_keep+1)) # array used to collect all eta-run data run_data[:,0]=ls VJs = jolanta.Jolanta_3D(rs, jparam) Ws = softbox(rs, rcut=3.0, lbd=1.0) for j, l_curr in enumerate(ls): Vs = VJs + l_curr*Ws energy = dvr.DVRDiag2(nGrid, Ts, Vs) run_data[j,1:] = au2eV*energy[0:nEs_keep] print(j+1, end=" ") if (j+1)%10==0: print() print(run_data[-1,:]) plt.cla() for i in range(0, nEs_keep): plt.plot(ls, run_data[:,i+1], 'o-') plt.ylim(-25,5) plt.show() cols = ['z'] for i in range(nEs_keep): cols.append('E'+str(i+1)) df = pd.DataFrame(run_data, columns=cols) df.to_csv('rac_DVR_3D_softbox_rmax_12.csv', index=False) df.head(5)
_____no_output_____
MIT
notebooks/RAC_3D/.ipynb_checkpoints/RAC-DVR-J3D-checkpoint.ipynb
tsommerfeld/L2-methods_for_resonances
Deep Learning=============Assignment 4------------Previously in `2_fullyconnected.ipynb` and `3_regularization.ipynb`, we trained fully connected networks to classify [notMNIST](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html) characters.The goal of this assignment is make the neural network convolutional.
# These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import time import numpy as np import tensorflow as tf from six.moves import cPickle as pickle from six.moves import range pickle_file = 'notMNIST.pickle' with open(pickle_file, 'rb') as f: save = pickle.load(f) train_dataset = save['train_dataset'] train_labels = save['train_labels'] valid_dataset = save['valid_dataset'] valid_labels = save['valid_labels'] test_dataset = save['test_dataset'] test_labels = save['test_labels'] del save # hint to help gc free up memory print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape)
Training set (200000, 28, 28) (200000,) Validation set (10000, 28, 28) (10000,) Test set (10000, 28, 28) (10000,)
Apache-2.0
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
Reformat into a TensorFlow-friendly shape:- convolutions need the image data formatted as a cube (width by height by channels)- labels as float 1-hot encodings.
image_size = 28 num_labels = 10 num_channels = 1 # grayscale import numpy as np def reformat(dataset, labels): dataset = dataset.reshape( (-1, image_size, image_size, num_channels)).astype(np.float32) labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) valid_dataset, valid_labels = reformat(valid_dataset, valid_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])
_____no_output_____
Apache-2.0
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
Let's build a small network with two convolutional layers, followed by one fully connected layer. Convolutional networks are more expensive computationally, so we'll limit its depth and number of fully connected nodes.
batch_size = 16 patch_size = 5 depth = 16 num_hidden = 64 graph = tf.Graph() with graph.as_default(): # Input data. tf_train_dataset = tf.placeholder( tf.float32, shape=(batch_size, image_size, image_size, num_channels)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. layer1_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, num_channels, depth], stddev=0.1)) layer1_biases = tf.Variable(tf.zeros([depth])) layer2_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, depth, depth], stddev=0.1)) layer2_biases = tf.Variable(tf.constant(1.0, shape=[depth])) layer3_weights = tf.Variable(tf.truncated_normal( [image_size // 4 * image_size // 4 * depth, num_hidden], stddev=0.1)) layer3_biases = tf.Variable(tf.constant(1.0, shape=[num_hidden])) layer4_weights = tf.Variable(tf.truncated_normal( [num_hidden, num_labels], stddev=0.1)) layer4_biases = tf.Variable(tf.constant(1.0, shape=[num_labels])) # Model. def model(data): conv = tf.nn.conv2d(data, layer1_weights, [1, 2, 2, 1], padding='SAME') hidden = tf.nn.relu(conv + layer1_biases) conv = tf.nn.conv2d(hidden, layer2_weights, [1, 2, 2, 1], padding='SAME') hidden = tf.nn.relu(conv + layer2_biases) shape = hidden.get_shape().as_list() reshape = tf.reshape(hidden, [shape[0], shape[1] * shape[2] * shape[3]]) hidden = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases) return tf.matmul(hidden, layer4_weights) + layer4_biases # Training computation. logits = model(tf_train_dataset) loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.05).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset)) test_prediction = tf.nn.softmax(model(tf_test_dataset)) num_steps = 1001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print('Initialized') for step in range(num_steps): offset = (step * batch_size) % (train_labels.shape[0] - batch_size) batch_data = train_dataset[offset:(offset + batch_size), :, :, :] batch_labels = train_labels[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 50 == 0): print('Minibatch loss at step %d: %f' % (step, l)) print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels)) print('Validation accuracy: %.1f%%' % accuracy( valid_prediction.eval(), valid_labels)) print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
Initialized Minibatch loss at step 0 : 3.51275 Minibatch accuracy: 6.2% Validation accuracy: 12.8% Minibatch loss at step 50 : 1.48703 Minibatch accuracy: 43.8% Validation accuracy: 50.4% Minibatch loss at step 100 : 1.04377 Minibatch accuracy: 68.8% Validation accuracy: 67.4% Minibatch loss at step 150 : 0.601682 Minibatch accuracy: 68.8% Validation accuracy: 73.0% Minibatch loss at step 200 : 0.898649 Minibatch accuracy: 75.0% Validation accuracy: 77.8% Minibatch loss at step 250 : 1.3637 Minibatch accuracy: 56.2% Validation accuracy: 75.4% Minibatch loss at step 300 : 1.41968 Minibatch accuracy: 62.5% Validation accuracy: 76.0% Minibatch loss at step 350 : 0.300648 Minibatch accuracy: 81.2% Validation accuracy: 80.2% Minibatch loss at step 400 : 1.32092 Minibatch accuracy: 56.2% Validation accuracy: 80.4% Minibatch loss at step 450 : 0.556701 Minibatch accuracy: 81.2% Validation accuracy: 79.4% Minibatch loss at step 500 : 1.65595 Minibatch accuracy: 43.8% Validation accuracy: 79.6% Minibatch loss at step 550 : 1.06995 Minibatch accuracy: 75.0% Validation accuracy: 81.2% Minibatch loss at step 600 : 0.223684 Minibatch accuracy: 100.0% Validation accuracy: 82.3% Minibatch loss at step 650 : 0.619602 Minibatch accuracy: 87.5% Validation accuracy: 81.8% Minibatch loss at step 700 : 0.812091 Minibatch accuracy: 75.0% Validation accuracy: 82.4% Minibatch loss at step 750 : 0.276302 Minibatch accuracy: 87.5% Validation accuracy: 82.3% Minibatch loss at step 800 : 0.450241 Minibatch accuracy: 81.2% Validation accuracy: 82.3% Minibatch loss at step 850 : 0.137139 Minibatch accuracy: 93.8% Validation accuracy: 82.3% Minibatch loss at step 900 : 0.52664 Minibatch accuracy: 75.0% Validation accuracy: 82.2% Minibatch loss at step 950 : 0.623835 Minibatch accuracy: 87.5% Validation accuracy: 82.1% Minibatch loss at step 1000 : 0.243114 Minibatch accuracy: 93.8% Validation accuracy: 82.9% Test accuracy: 90.0%
Apache-2.0
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
---Problem 1---------The convolutional model above uses convolutions with stride 2 to reduce the dimensionality. Replace the strides by a max pooling operation (`nn.max_pool()`) of stride 2 and kernel size 2.---
# TODO
_____no_output_____
Apache-2.0
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
---Problem 2---------Try to get the best performance you can using a convolutional net. Look for example at the classic [LeNet5](http://yann.lecun.com/exdb/lenet/) architecture, adding Dropout, and/or adding learning rate decay.---
batch_size = 16 patch_size = 3 depth = 16 num_hidden = 705 num_hidden_last = 205 graph = tf.Graph() with graph.as_default(): # Input data. tf_train_dataset = tf.placeholder( tf.float32, shape=(batch_size, image_size, image_size, num_channels)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. layerconv1_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, num_channels, depth], stddev=0.1)) layerconv1_biases = tf.Variable(tf.zeros([depth])) layerconv2_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, depth, depth * 2], stddev=0.1)) layerconv2_biases = tf.Variable(tf.zeros([depth * 2])) layerconv3_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, depth * 2, depth * 4], stddev=0.03)) layerconv3_biases = tf.Variable(tf.zeros([depth * 4])) layerconv4_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, depth * 4, depth * 4], stddev=0.03)) layerconv4_biases = tf.Variable(tf.zeros([depth * 4])) layerconv5_weights = tf.Variable(tf.truncated_normal( [patch_size, patch_size, depth * 4, depth * 16], stddev=0.03)) layerconv5_biases = tf.Variable(tf.zeros([depth * 16])) layer3_weights = tf.Variable(tf.truncated_normal( [image_size / 7 * image_size / 7 * (depth * 4), num_hidden], stddev=0.03)) layer3_biases = tf.Variable(tf.zeros([num_hidden])) layer4_weights = tf.Variable(tf.truncated_normal( [num_hidden, num_hidden_last], stddev=0.0532)) layer4_biases = tf.Variable(tf.zeros([num_hidden_last])) layer5_weights = tf.Variable(tf.truncated_normal( [num_hidden_last, num_labels], stddev=0.1)) layer5_biases = tf.Variable(tf.zeros([num_labels])) # Model. def model(data, use_dropout=False): conv = tf.nn.conv2d(data, layerconv1_weights, [1, 1, 1, 1], padding='SAME') hidden = tf.nn.elu(conv + layerconv1_biases) pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') conv = tf.nn.conv2d(pool, layerconv2_weights, [1, 1, 1, 1], padding='SAME') hidden = tf.nn.elu(conv + layerconv2_biases) #pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') conv = tf.nn.conv2d(hidden, layerconv3_weights, [1, 1, 1, 1], padding='SAME') hidden = tf.nn.elu(conv + layerconv3_biases) pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') # norm1 # norm1 = tf.nn.lrn(pool, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) conv = tf.nn.conv2d(pool, layerconv4_weights, [1, 1, 1, 1], padding='SAME') hidden = tf.nn.elu(conv + layerconv4_biases) pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') # norm1 = tf.nn.lrn(pool, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) conv = tf.nn.conv2d(pool, layerconv5_weights, [1, 1, 1, 1], padding='SAME') hidden = tf.nn.elu(conv + layerconv5_biases) pool = tf.nn.max_pool(hidden, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME') # norm1 = tf.nn.lrn(pool, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75) shape = pool.get_shape().as_list() #print(shape) reshape = tf.reshape(pool, [shape[0], shape[1] * shape[2] * shape[3]]) hidden = tf.nn.elu(tf.matmul(reshape, layer3_weights) + layer3_biases) if use_dropout: hidden = tf.nn.dropout(hidden, 0.75) nn_hidden_layer = tf.matmul(hidden, layer4_weights) + layer4_biases hidden = tf.nn.elu(nn_hidden_layer) if use_dropout: hidden = tf.nn.dropout(hidden, 0.75) return tf.matmul(hidden, layer5_weights) + layer5_biases # Training computation. logits = model(tf_train_dataset, True) loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) global_step = tf.Variable(0) # count the number of steps taken. learning_rate = tf.train.exponential_decay(0.1, global_step, 3000, 0.86, staircase=True) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax(model(tf_valid_dataset)) test_prediction = tf.nn.softmax(model(tf_test_dataset)) num_steps = 45001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print("Initialized") for step in xrange(num_steps): offset = (step * batch_size) % (train_labels.shape[0] - batch_size) batch_data = train_dataset[offset:(offset + batch_size), :, :, :] batch_labels = train_labels[offset:(offset + batch_size), :] feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step", step, ":", l) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print(time.ctime()) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
Initialized Minibatch loss at step 0 : 2.30135 Minibatch accuracy: 6.2% Validation accuracy: 11.4% Thu Jul 14 19:41:34 2016 Minibatch loss at step 500 : 0.77839 Minibatch accuracy: 87.5% Validation accuracy: 85.0% Thu Jul 14 19:42:07 2016 Minibatch loss at step 1000 : 0.239152 Minibatch accuracy: 93.8% Validation accuracy: 86.5% Thu Jul 14 19:42:50 2016 Minibatch loss at step 1500 : 0.642659 Minibatch accuracy: 81.2% Validation accuracy: 86.7% Thu Jul 14 19:43:30 2016 Minibatch loss at step 2000 : 0.194781 Minibatch accuracy: 87.5% Validation accuracy: 87.4% Thu Jul 14 19:44:08 2016 Minibatch loss at step 2500 : 1.07727 Minibatch accuracy: 62.5% Validation accuracy: 87.4% Thu Jul 14 19:44:53 2016 Minibatch loss at step 3000 : 0.656757 Minibatch accuracy: 87.5% Validation accuracy: 88.3% Thu Jul 14 19:45:32 2016 Minibatch loss at step 3500 : 0.417028 Minibatch accuracy: 87.5% Validation accuracy: 88.4% Thu Jul 14 19:46:10 2016 Minibatch loss at step 4000 : 0.498826 Minibatch accuracy: 81.2% Validation accuracy: 89.3% Thu Jul 14 19:46:51 2016 Minibatch loss at step 4500 : 0.501579 Minibatch accuracy: 87.5% Validation accuracy: 89.3% Thu Jul 14 19:47:36 2016 Minibatch loss at step 5000 : 0.852857 Minibatch accuracy: 75.0% Validation accuracy: 88.5% Thu Jul 14 19:48:23 2016 Minibatch loss at step 5500 : 0.468938 Minibatch accuracy: 87.5% Validation accuracy: 88.9% Thu Jul 14 19:49:09 2016
Apache-2.0
previous_training/udacity/4_convolutions.ipynb
archelogos/smart-live-camera
STEP 4 - Making DRL PySC2 Agent
%load_ext autoreload %autoreload 2 import sys; sys.path.append('..') ### unfortunately, PySC2 uses Abseil, which treats python code as if its run like an app # This does not play well with jupyter notebook # So we will need to monkeypatch sys.argv import sys #sys.argv = ["python", "--map", "AbyssalReef"] sys.argv = ["python", "--map", "Simple64"]
_____no_output_____
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
0. Runnning 'Agent code' on jupyter notebook
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run an agent.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import importlib import threading from absl import app from absl import flags from future.builtins import range # pylint: disable=redefined-builtin from pysc2 import maps from pysc2.env import available_actions_printer from pysc2.env import run_loop from pysc2.env import sc2_env from pysc2.lib import point_flag from pysc2.lib import stopwatch from pysc2.lib import actions FLAGS = flags.FLAGS # because of Abseil's horrible design for running code underneath Colabs # We have to pull out this ugly hack from the hat if "flags_defined" not in globals(): flags.DEFINE_bool("render", False, "Whether to render with pygame.") point_flag.DEFINE_point("feature_screen_size", "84", "Resolution for screen feature layers.") point_flag.DEFINE_point("feature_minimap_size", "64", "Resolution for minimap feature layers.") point_flag.DEFINE_point("rgb_screen_size", None, "Resolution for rendered screen.") point_flag.DEFINE_point("rgb_minimap_size", None, "Resolution for rendered minimap.") flags.DEFINE_enum("action_space", "RAW", sc2_env.ActionSpace._member_names_, # pylint: disable=protected-access "Which action space to use. Needed if you take both feature " "and rgb observations.") flags.DEFINE_bool("use_feature_units", False, "Whether to include feature units.") flags.DEFINE_bool("use_raw_units", True, "Whether to include raw units.") flags.DEFINE_integer("raw_resolution", 64, "Raw Resolution.") flags.DEFINE_bool("disable_fog", True, "Whether to disable Fog of War.") flags.DEFINE_integer("max_agent_steps", 0, "Total agent steps.") flags.DEFINE_integer("game_steps_per_episode", None, "Game steps per episode.") flags.DEFINE_integer("max_episodes", 0, "Total episodes.") flags.DEFINE_integer("step_mul", 8, "Game steps per agent step.") flags.DEFINE_float("fps", 22.4, "Frames per second to run the game.") #flags.DEFINE_string("agent", "sc2.agent.BasicAgent.ZergBasicAgent", # "Which agent to run, as a python path to an Agent class.") #flags.DEFINE_enum("agent_race", "zerg", sc2_env.Race._member_names_, # pylint: disable=protected-access # "Agent 1's race.") flags.DEFINE_string("agent", "TerranRLAgentWithRawActsAndRawObs", "Which agent to run, as a python path to an Agent class.") flags.DEFINE_enum("agent_race", "terran", sc2_env.Race._member_names_, # pylint: disable=protected-access "Agent 1's race.") flags.DEFINE_string("agent2", "Bot", "Second agent, either Bot or agent class.") flags.DEFINE_enum("agent2_race", "random", sc2_env.Race._member_names_, # pylint: disable=protected-access "Agent 2's race.") flags.DEFINE_enum("difficulty", "hard", sc2_env.Difficulty._member_names_, # pylint: disable=protected-access "If agent2 is a built-in Bot, it's strength.") flags.DEFINE_bool("profile", False, "Whether to turn on code profiling.") flags.DEFINE_bool("trace", False, "Whether to trace the code execution.") flags.DEFINE_integer("parallel", 1, "How many instances to run in parallel.") flags.DEFINE_bool("save_replay", True, "Whether to save a replay at the end.") flags.DEFINE_string("map", None, "Name of a map to use.") flags.mark_flag_as_required("map") flags_defined = True def run_thread(agent_classes, players, map_name, visualize): """Run one thread worth of the environment with agents.""" with sc2_env.SC2Env( map_name=map_name, players=players, agent_interface_format=sc2_env.parse_agent_interface_format( feature_screen=FLAGS.feature_screen_size, feature_minimap=FLAGS.feature_minimap_size, rgb_screen=FLAGS.rgb_screen_size, rgb_minimap=FLAGS.rgb_minimap_size, action_space=FLAGS.action_space, use_raw_units=FLAGS.use_raw_units, raw_resolution=FLAGS.raw_resolution), step_mul=FLAGS.step_mul, game_steps_per_episode=FLAGS.game_steps_per_episode, disable_fog=FLAGS.disable_fog, visualize=visualize) as env: #env = available_actions_printer.AvailableActionsPrinter(env) agents = [agent_cls() for agent_cls in agent_classes] run_loop.run_loop(agents, env, FLAGS.max_agent_steps, FLAGS.max_episodes) if FLAGS.save_replay: env.save_replay(agent_classes[0].__name__) def main(unused_argv): """Run an agent.""" #stopwatch.sw.enabled = FLAGS.profile or FLAGS.trace #stopwatch.sw.trace = FLAGS.trace map_inst = maps.get(FLAGS.map) agent_classes = [] players = [] #agent_module, agent_name = FLAGS.agent.rsplit(".", 1) #agent_cls = getattr(importlib.import_module(agent_module), agent_name) #agent_classes.append(agent_cls) agent_classes.append(TerranRLAgentWithRawActsAndRawObs) players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent_race])) if map_inst.players >= 2: if FLAGS.agent2 == "Bot": players.append(sc2_env.Bot(sc2_env.Race[FLAGS.agent2_race], sc2_env.Difficulty[FLAGS.difficulty])) else: #agent_module, agent_name = FLAGS.agent2.rsplit(".", 1) #agent_cls = getattr(importlib.import_module(agent_module), agent_name) agent_classes.append(TerranRandomAgent) players.append(sc2_env.Agent(sc2_env.Race[FLAGS.agent2_race])) threads = [] for _ in range(FLAGS.parallel - 1): t = threading.Thread(target=run_thread, args=(agent_classes, players, FLAGS.map, False)) threads.append(t) t.start() run_thread(agent_classes, players, FLAGS.map, FLAGS.render) for t in threads: t.join() if FLAGS.profile: pass #print(stopwatch.sw)
_____no_output_____
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
1. Creating a PySC2 Agent with Raw Actions & Observations![StarCraft2 PySC2 interfaces](./images/StarCraft2_PySC2_interfaces.png)ref : https://on-demand.gputechconf.com/gtc/2018/presentation/s8739-machine-learning-with-starcraft-II.pdf 1st, Rendered* Decomposed : - Screen, minimap, resources, available actions* Same control as humans : - Pixel coordinates - Move camera - Select unit/rectangle* Great for Deep Learning, but hard 2nd, Feature Layer* Same actions : still in pixel space* Same decomposed observations, but more abstract - Orthogonal camera * Layers: - unit type - unit owner - selection - health - unit density - etc 3rd, Raw* List of units and state* Control each unit individually in world coordinates* Gives all observable state (no camera)* Great for scripted agents and programmatic replay analysis * Raw Actions & Observations 은 world cordinates를 사용하므로 전체 Map을 한번에 관찰하고 Camera를 이동하지 않고도 Map 상의 어느 곳에서도 Action을 취할 수 있는 새로운 형태의 Feature 이다.* 이번 과정에 SL(Supervised Learning, 지도학습)을 활용한 학습은 없지만 스타크래프트 2 리플레이를 활용한 SL은 Raw Actions & Observations를 활용한 "programmatic replay analysis"가 필요하다.* 인간 플레이어를 이긴 DeepMind의 AlphaStar의 주요 변경사항 중의 하나는 Raw Actions & Observations 의 활용이다. DRL 모델의 성능 추이를 보기위해 Reward의 평균 추이를 이용한다. 이때 단순이동평균 보다는 지수이동평균이 적절하다. 지수이동평균(EMA:Exponential Moving Average) 란?지수이동평균(Exponential Moving Average)은 과거의 모든 기간을 계산대상으로 하며 최근의 데이타에 더 높은 가중치를 두는 일종의 가중이동평균법이다.단순이동평균의 계산법에 비하여 원리가 복잡해 보이지만 실제로 이동평균을 산출하는 방법은 Previous Step의 지수이동평균값과 평활계수(smoothing constant) 그리고 당일의 가격만으로 구할 수 있으므로 Previous Step의 지수이동평균값만 구해진다면 오히려 간단한 편이다.따라서 지수이동평균은 단순이동평균에 비해 몇가지 중요한 강점을 가진다.첫째는 가장 최근의 Step에 가장 큰 가중치를 둠으로 해서 최근의 Episode들을 잘 반영한다는 점이고, 둘째는 단순이동평균에서와 같이 오래된 데이타를 갑자기 제외하지 않고 천천히 그 영향력을 사라지게 한다는 점이다.또한 전 기간의 데이타를 분석대상으로 함으로써 가중이동평균에서 문제되는 특정 기간의 데이타만을 분석대상으로 한다는 단점도 보완하고 있다. 지수이동평균(EMA:Exponential Moving Average) 계산지수이동평균은 가장 최근의 값에 많은 가중치를 부여하고 오래 된 값에는 적은 가중치를 부여한다. 비록 오래 된 값이라고 할지라도 완전히 무시하지는 않고 적게나마 반영시켜 계산한다는 장점이 있다. 단기 변동성을 포착하려는 것이 목적이다.EMA=Previous Step 지수이동평균+(k∗(Current Step Reward − Previous Step 지수이동평균)) 3. Applying Vanilla DQN to a PySC2 Agent구현된 기능- Implementing 'Experience Replay' : - 'Maximization Bias' 문제를 발생시키는 원인 중 하나인 'Sample간의 시간적 연관성'을 해결하기 위한 방법 - Online Learning 에서 Batch Learning 으로 학습방법 바뀜 : Online update 는 Batch update 보다 일반적으로 Validation loss 가 더 높게 나타남. - Reinforcement Learning for Robots. Using Neural Networks. Long -Ji Lin. January 6, 1993. 논문에서 최초로 연구됨 http://isl.anthropomatik.kit.edu/pdf/Lin1993.pdf- Implementing 'Fixed Q-Target' : - 'Moving Q-Target' 문제 해결하기 위한 방법 - 2015년 Nature 버전 DQN 논문에서 처음 제안됨. https://deepmind.com/research/publications/human-level-control-through-deep-reinforcement-learning 구현되지 않은 기능- Implementing 'Sensory Input Feature-Extraction' : - 게임의 Raw Image 를 Neural Net에 넣기 위한 Preprocessing(전처리) 과정 - Raw Image 의 Sequence중 '최근 4개의 이미지'(과거 정보)를 하나의 새로운 State로 정의하여 non-MDP를 MDP 문제로 바꾸는 Preprocessing 과정 - CNN(합성곱 신경망)을 활용한 '차원의 저주' 극복
import random import time import math import os.path import numpy as np import pandas as pd from collections import deque import pickle from pysc2.agents import base_agent from pysc2.env import sc2_env from pysc2.lib import actions, features, units, upgrades from absl import app import torch from torch.utils.tensorboard import SummaryWriter from skdrl.pytorch.model.mlp import NaiveMultiLayerPerceptron from skdrl.common.memory.memory import ExperienceReplayMemory DATA_FILE_QNET = 'rlagent_with_vanilla_dqn_qnet' DATA_FILE_QNET_TARGET = 'rlagent_with_vanilla_dqn_qnet_target' SCORE_FILE = 'rlagent_with_vanilla_dqn_score' scores = [] # list containing scores from each episode scores_window = deque(maxlen=100) # last 100 scores device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") writer = SummaryWriter() import torch import torch.nn as nn class NaiveMultiLayerPerceptron(nn.Module): def __init__(self, input_dim: int, output_dim: int, num_neurons: list = [64, 32], hidden_act_func: str = 'ReLU', out_act_func: str = 'Identity'): super(NaiveMultiLayerPerceptron, self).__init__() self.input_dim = input_dim self.output_dim = output_dim self.num_neurons = num_neurons self.hidden_act_func = getattr(nn, hidden_act_func)() self.out_act_func = getattr(nn, out_act_func)() input_dims = [input_dim] + num_neurons output_dims = num_neurons + [output_dim] self.layers = nn.ModuleList() for i, (in_dim, out_dim) in enumerate(zip(input_dims, output_dims)): is_last = True if i == len(input_dims) - 1 else False self.layers.append(nn.Linear(in_dim, out_dim)) if is_last: self.layers.append(self.out_act_func) else: self.layers.append(self.hidden_act_func) def forward(self, xs): for layer in self.layers: xs = layer(xs) return xs if __name__ == '__main__': net = NaiveMultiLayerPerceptron(10, 1, [20, 12], 'ReLU', 'Identity') print(net) xs = torch.randn(size=(12, 10)) ys = net(xs) print(ys)
NaiveMultiLayerPerceptron( (hidden_act_func): ReLU() (out_act_func): Identity() (layers): ModuleList( (0): Linear(in_features=10, out_features=20, bias=True) (1): ReLU() (2): Linear(in_features=20, out_features=12, bias=True) (3): ReLU() (4): Linear(in_features=12, out_features=1, bias=True) (5): Identity() ) ) tensor([[-0.3301], [-0.4388], [-0.4118], [-0.3924], [-0.3717], [-0.4231], [-0.4617], [-0.3912], [-0.3118], [-0.3619], [-0.3262], [-0.4291]], grad_fn=<AddmmBackward>)
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
Q-update 공식 1. Online Q-learning![Online Q-learning](./images/q-update-experience-replay.png) 2. Online Q-learning with Function Approximation![Online Q-learning with Function Approximation](./images/q-update-function-approximation.png) 3. Batch Q-learning with Function Approximation & Experience Replay![Batch Q-learning with Function Approximation & Experience Replay](./images/q-update-online.png)
from random import sample class ExperienceReplayMemory: def __init__(self, max_size): # deque object that we've used for 'episodic_memory' is not suitable for random sampling # here, we instead use a fix-size array to implement 'buffer' self.buffer = [None] * max_size self.max_size = max_size self.index = 0 self.size = 0 def push(self, obj): self.buffer[self.index] = obj self.size = min(self.size + 1, self.max_size) self.index = (self.index + 1) % self.max_size def sample(self, batch_size): indices = sample(range(self.size), batch_size) return [self.buffer[index] for index in indices] def __len__(self): return self.size
_____no_output_____
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
Moving target problem 1. Function Approximation을 사용하지 않는 Q-learning 의 경우 : 특정한 Q(s,a) update가 다른 Q(s,a)에 영향을 주지 않는다.![Moving target Q-learning](./images/moving-target_q-learing_case.png) 2. Function Approximation을 사용하는 Q-learnig 의 경우 : 특정한 Q(s,a) update가 다른 Q(s,a)에 영향을 준다.![Moving target Q-learning with Function Approximation](./images/moving-target_q-learing_with_function_approximation_case.png) Moving target 문제는 Deep Neural Network를 사용하는 Function Approximation 기법인 경우 심해지는 경향성이 있음.image ref : Fast Campus RL online courese `nn.SmoothL1Loss()` = Huber loss 란?Mean-squared Error (MSE) Loss 는 데이터의 outlier에 매우 취약하다.어떤 이유로 타겟하는 레이블 y (이 경우는 q-learning target)이 noisy 할때를 가정하면, 잘못된 y 값을 맞추기 위해 파라미터들이 너무 sensitive 하게 움직이게 된다.이런 현상은 q-learning 의 학습초기에 매우 빈번해 나타난다. 이러한 문제를 조금이라도 완화하기 위해서 outlier에 덜 민감한 Huber loss 함수를 사용한다. SmoothL1Loss (aka Huber loss)$$loss(x,y) = \frac{1}{n}\sum_i z_i$$$|x_i - y_i| <1$ 일때,$$z_i = 0.5(x_i - y_i)^2$$$|x_i - y_i| \geq1$ 일때,$$z_i = |x_i - y_i|-0.5$$ref : https://pytorch.org/docs/master/generated/torch.nn.SmoothL1Loss.html
import torch import torch.nn as nn import numpy as np import random class DQN(nn.Module): def __init__(self, state_dim: int, action_dim: int, qnet: nn.Module, qnet_target: nn.Module, lr: float, gamma: float, epsilon: float): """ :param state_dim: input state dimension :param action_dim: action dimension :param qnet: main q network :param qnet_target: target q network :param lr: learning rate :param gamma: discount factor of MDP :param epsilon: E-greedy factor """ super(DQN, self).__init__() self.state_dim = state_dim self.action_dim = action_dim self.qnet = qnet self.lr = lr self.gamma = gamma self.opt = torch.optim.Adam(params=self.qnet.parameters(), lr=lr) self.register_buffer('epsilon', torch.ones(1) * epsilon) # target network related qnet_target.load_state_dict(qnet.state_dict()) self.qnet_target = qnet_target self.criteria = nn.SmoothL1Loss() def choose_action(self, state): qs = self.qnet(state) #prob = np.random.uniform(0.0, 1.0, 1) #if torch.from_numpy(prob).float() <= self.epsilon: # random if random.random() <= self.epsilon: # random action = np.random.choice(range(self.action_dim)) else: # greedy action = qs.argmax(dim=-1) return int(action) def learn(self, state, action, reward, next_state, done): s, a, r, ns = state, action, reward, next_state # print("state: ", s) # print("action: ", a) # print("reward: ", reward) # print("next_state: ", ns) # compute Q-Learning target with 'target network' with torch.no_grad(): q_max, _ = self.qnet_target(ns).max(dim=-1, keepdims=True) q_target = r + self.gamma * q_max * (1 - done) q_val = self.qnet(s).gather(1, a) loss = self.criteria(q_val, q_target) self.opt.zero_grad() loss.backward() self.opt.step() def prepare_training_inputs(sampled_exps, device='cpu'): states = [] actions = [] rewards = [] next_states = [] dones = [] for sampled_exp in sampled_exps: states.append(sampled_exp[0]) actions.append(sampled_exp[1]) rewards.append(sampled_exp[2]) next_states.append(sampled_exp[3]) dones.append(sampled_exp[4]) states = torch.cat(states, dim=0).float().to(device) actions = torch.cat(actions, dim=0).to(device) rewards = torch.cat(rewards, dim=0).float().to(device) next_states = torch.cat(next_states, dim=0).float().to(device) dones = torch.cat(dones, dim=0).float().to(device) return states, actions, rewards, next_states, dones
_____no_output_____
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
Action 함수 정의
class TerranAgentWithRawActsAndRawObs(base_agent.BaseAgent): # actions 추가 및 함수 정의(hirerachy하게) actions = ("do_nothing", "train_scv", "harvest_minerals", "harvest_gas", "build_commandcenter", "build_refinery", "build_supply_depot", "build_barracks", "train_marine", "build_factorys", "build_techlab_factorys", "train_tank", "build_armorys", "build_starports", "build_techlab_starports", "train_banshee", "attack", "attack_all", "tank_control" ) def unit_type_is_selected(self, obs, unit_type): if (len(obs.observation.single_select) > 0 and obs.observation.single_select[0].unit_type == unit_type): return True if (len(obs.observation.multi_select) > 0 and obs.observation.multi_select[0].unit_type == unit_type): return True return False def get_my_units_by_type(self, obs, unit_type): if unit_type == units.Neutral.VespeneGeyser: # 가스 일 때만 return [unit for unit in obs.observation.raw_units if unit.unit_type == unit_type] return [unit for unit in obs.observation.raw_units if unit.unit_type == unit_type and unit.alliance == features.PlayerRelative.SELF] def get_enemy_units_by_type(self, obs, unit_type): return [unit for unit in obs.observation.raw_units if unit.unit_type == unit_type and unit.alliance == features.PlayerRelative.ENEMY] def get_my_completed_units_by_type(self, obs, unit_type): return [unit for unit in obs.observation.raw_units if unit.unit_type == unit_type and unit.build_progress == 100 and unit.alliance == features.PlayerRelative.SELF] def get_enemy_completed_units_by_type(self, obs, unit_type): return [unit for unit in obs.observation.raw_units if unit.unit_type == unit_type and unit.build_progress == 100 and unit.alliance == features.PlayerRelative.ENEMY] def get_distances(self, obs, units, xy): units_xy = [(unit.x, unit.y) for unit in units] return np.linalg.norm(np.array(units_xy) - np.array(xy), axis=1) def step(self, obs): super(TerranAgentWithRawActsAndRawObs, self).step(obs) if obs.first(): command_center = self.get_my_units_by_type( obs, units.Terran.CommandCenter)[0] self.base_top_left = (command_center.x < 32) self.top_left_gas_xy = [(14, 25), (21,19), (46,23), (39,16)] self.bottom_right_gas_xy = [(44, 43), (37,50), (12,46), (19,53)] self.cloaking_flag = 1 self.TerranVehicleWeaponsLevel1 = False self.TerranVehicleWeaponsLevel2 = False self.TerranVehicleWeaponsLevel3 = False def do_nothing(self, obs): return actions.RAW_FUNCTIONS.no_op() def train_scv(self, obs): completed_commandcenterses = self.get_my_completed_units_by_type( obs, units.Terran.CommandCenter) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if (len(completed_commandcenterses) > 0 and obs.observation.player.minerals >= 100 and len(scvs) < 35): commandcenters = self.get_my_units_by_type(obs, units.Terran.CommandCenter) ccs =[commandcenter for commandcenter in commandcenters if commandcenter.assigned_harvesters < 18] if ccs: ccs = ccs[0] if ccs.order_length < 5: return actions.RAW_FUNCTIONS.Train_SCV_quick("now", ccs.tag) return actions.RAW_FUNCTIONS.no_op() def harvest_minerals(self, obs): scvs = self.get_my_units_by_type(obs, units.Terran.SCV) commandcenters = self.get_my_units_by_type(obs,units.Terran.CommandCenter) # 최적 자원 할당 유닛 구현 cc = [commandcenter for commandcenter in commandcenters if commandcenter.assigned_harvesters < 18] if cc: cc = cc[0] idle_scvs = [scv for scv in scvs if scv.order_length == 0] if len(idle_scvs) > 0 and cc.assigned_harvesters < 18: mineral_patches = [unit for unit in obs.observation.raw_units if unit.unit_type in [ units.Neutral.BattleStationMineralField, units.Neutral.BattleStationMineralField750, units.Neutral.LabMineralField, units.Neutral.LabMineralField750, units.Neutral.MineralField, units.Neutral.MineralField750, units.Neutral.PurifierMineralField, units.Neutral.PurifierMineralField750, units.Neutral.PurifierRichMineralField, units.Neutral.PurifierRichMineralField750, units.Neutral.RichMineralField, units.Neutral.RichMineralField750 ]] scv = random.choice(idle_scvs) distances = self.get_distances(obs, mineral_patches, (scv.x, scv.y)) mineral_patch = mineral_patches[np.argmin(distances)] return actions.RAW_FUNCTIONS.Harvest_Gather_unit( "now", scv.tag, mineral_patch.tag) return actions.RAW_FUNCTIONS.no_op() def harvest_gas(self, obs): scvs = self.get_my_units_by_type(obs, units.Terran.SCV) refs = self.get_my_units_by_type(obs, units.Terran.Refinery) refs = [refinery for refinery in refs if refinery.assigned_harvesters < 3] if refs: ref = refs[0] if len(scvs) > 0 and ref.ideal_harvesters: scv = random.choice(scvs) distances = self.get_distances(obs, refs, (scv.x, scv.y)) ref = refs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Harvest_Gather_unit( "now", scv.tag, ref.tag) return actions.RAW_FUNCTIONS.no_op() def build_commandcenter(self,obs): commandcenters = self.get_my_units_by_type(obs,units.Terran.CommandCenter) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if len(commandcenters) == 0 and obs.observation.player.minerals >= 400 and len(scvs) > 0: # 본진 commandcenter가 파괴된 경우 ccs_xy = (19, 23) if self.base_top_left else (39,45) distances = self.get_distances(obs, scvs, ccs_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_CommandCenter_pt( "now", scv.tag, ccs_xy) if ( len(commandcenters) < 2 and obs.observation.player.minerals >= 400 and len(scvs) > 0): ccs_xy = (41, 21) if self.base_top_left else (17, 48) if len(commandcenters) == 1 and ( (commandcenters[0].x,commandcenters[0].y) == (41,21) or (commandcenters[0].x,commandcenters[0].y) == (17,48)): # 본진 commandcenter가 파괴된 경우 ccs_xy = (19, 23) if self.base_top_left else (39,45) distances = self.get_distances(obs, scvs, ccs_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_CommandCenter_pt( "now", scv.tag, ccs_xy) return actions.RAW_FUNCTIONS.no_op() ################################################################################################ ####################################### refinery ############################################### def build_refinery(self,obs): refinerys = self.get_my_units_by_type(obs,units.Terran.Refinery) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if (obs.observation.player.minerals >= 100 and len(scvs) > 0): gas = self.get_my_units_by_type(obs, units.Neutral.VespeneGeyser)[0] if self.base_top_left: gases = self.top_left_gas_xy else: gases = self.bottom_right_gas_xy rc = np.random.choice([0,1,2,3]) gas_xy = gases[rc] if (gas.x, gas.y) == gas_xy: distances = self.get_distances(obs, scvs, gas_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_Refinery_pt( "now", scv.tag, gas.tag) return actions.RAW_FUNCTIONS.no_op() def build_supply_depot(self, obs): supply_depots = self.get_my_units_by_type(obs, units.Terran.SupplyDepot) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used) if (obs.observation.player.minerals >= 100 and len(scvs) > 0 and free_supply < 8): ccs = self.get_my_units_by_type(obs, units.Terran.CommandCenter) if ccs: for cc in ccs: cc_x, cc_y = cc.x, cc.y rand1,rand2 = random.randint(0,10),random.randint(-10,0) supply_depot_xy = (cc_x + rand1, cc_y + rand2) if self.base_top_left else (cc_x - rand1, cc_y - rand2) if 0 < supply_depot_xy[0] < 64 and 0 < supply_depot_xy[1] < 64: pass else: return actions.RAW_FUNCTIONS.no_op() distances = self.get_distances(obs, scvs, supply_depot_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_SupplyDepot_pt( "now", scv.tag, supply_depot_xy) return actions.RAW_FUNCTIONS.no_op() def build_barracks(self, obs): completed_supply_depots = self.get_my_completed_units_by_type( obs, units.Terran.SupplyDepot) barrackses = self.get_my_units_by_type(obs, units.Terran.Barracks) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if (len(completed_supply_depots) > 0 and obs.observation.player.minerals >= 150 and len(scvs) > 0 and len(barrackses)< 3): brks = self.get_my_units_by_type(obs, units.Terran.SupplyDepot) completed_command_center = self.get_my_completed_units_by_type( obs, units.Terran.CommandCenter) if len(barrackses) >= 1 and len(completed_command_center) == 1: # double commands commandcenters = self.get_my_units_by_type(obs,units.Terran.CommandCenter) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if ( len(commandcenters) < 2 and obs.observation.player.minerals >= 400 and len(scvs) > 0): ccs_xy = (41, 21) if self.base_top_left else (17, 48) distances = self.get_distances(obs, scvs, ccs_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_CommandCenter_pt( "now", scv.tag, ccs_xy) if brks: for brk in brks: brk_x,brk_y = brk.x, brk.y rand1, rand2 = random.randint(1,3),random.randint(1,3) barracks_xy = (brk_x + rand1, brk_y + rand2) if self.base_top_left else (brk_x - rand1, brk_y - rand2) if 0 < barracks_xy[0] < 64 and 0 < barracks_xy[1] < 64: pass else: return actions.RAW_FUNCTIONS.no_op() distances = self.get_distances(obs, scvs, barracks_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_Barracks_pt( "now", scv.tag, barracks_xy) return actions.RAW_FUNCTIONS.no_op() def train_marine(self, obs): ################# 아머리가 완성된 후 부터 토르생산 ###################### completed_barrackses = self.get_my_completed_units_by_type( obs, units.Terran.Barracks) completed_factorys = self.get_my_completed_units_by_type( obs, units.Terran.Factory) completed_armorys = self.get_my_completed_units_by_type( obs, units.Terran.Armory) marines = self.get_my_units_by_type(obs, units.Terran.Marine) free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used) if (len(completed_barrackses) > 0 and obs.observation.player.minerals >= 100 and free_supply > 0 and len(completed_armorys) == 0): barracks = self.get_my_units_by_type(obs, units.Terran.Barracks)[0] if barracks.order_length < 5: return actions.RAW_FUNCTIONS.Train_Marine_quick("now", barracks.tag) elif free_supply > 0 and len(completed_factorys) > 0 and len(completed_armorys) > 0: factory = completed_factorys[0] if factory.order_length < 5: return actions.RAW_FUNCTIONS.Train_Thor_quick("now", factory.tag) return actions.RAW_FUNCTIONS.no_op() ############################################################################################### ###################################### Factorys ############################################### ############################################################################################### def build_factorys(self, obs): completed_barrackses = self.get_my_completed_units_by_type( obs, units.Terran.Barracks) factorys = self.get_my_units_by_type(obs, units.Terran.Factory) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) ref = self.get_my_completed_units_by_type(obs,units.Terran.Refinery) # print("gas: ", obs.observation.player.minerals) # print("gas: ", obs.observation.player.gas) if (len(completed_barrackses) > 0 and obs.observation.player.minerals >= 200 and len(factorys) < 3 and len(scvs) > 0): if len(factorys) >= 1 and len(ref) < 4: # 가스부족시 가스 건설 refinerys = self.get_my_units_by_type(obs,units.Terran.Refinery) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if (obs.observation.player.minerals >= 100 and len(scvs) > 0): gas = self.get_my_units_by_type(obs, units.Neutral.VespeneGeyser)[0] if self.base_top_left: gases = self.top_left_gas_xy else: gases = self.bottom_right_gas_xy rc = np.random.choice([0,1,2,3]) gas_xy = gases[rc] if (gas.x, gas.y) == gas_xy: distances = self.get_distances(obs, scvs, gas_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_Refinery_pt( "now", scv.tag, gas.tag) if len(factorys) >= 1: rand1 = random.randint(-5,5) fx, fy = factorys[0].x, factorys[0].y factorys_xy = (fx + rand1, fy + rand1) if self.base_top_left else (fx - rand1, fy - rand1) else: rand1, rand2 = random.randint(-2,2), random.randint(-2,2) # x, y factorys_xy = (39 + rand1, 25 + rand2) if self.base_top_left else (17 - rand1, 40 - rand2) if 0 < factorys_xy[0] < 64 and 0 < factorys_xy[1] < 64 and factorys_xy != (17,48) and factorys_xy != (41,21): pass else: return actions.RAW_FUNCTIONS.no_op() distances = self.get_distances(obs, scvs, factorys_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_Factory_pt( "now", scv.tag, factorys_xy) return actions.RAW_FUNCTIONS.no_op() def build_techlab_factorys(self, obs): completed_factorys = self.get_my_completed_units_by_type( obs, units.Terran.Factory) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if (len(completed_factorys) > 0 and obs.observation.player.minerals >= 200): ftrs = self.get_my_units_by_type(obs, units.Terran.Factory) if ftrs: for ftr in ftrs: ftr_x,ftr_y = ftr.x, ftr.y factorys_xy = (ftr_x,ftr_y) if 0 < factorys_xy[0] < 64 and 0 < factorys_xy[1] < 64: pass else: return actions.RAW_FUNCTIONS.no_op() return actions.RAW_FUNCTIONS.Build_TechLab_Factory_pt( "now", ftr.tag, factorys_xy) return actions.RAW_FUNCTIONS.no_op() def train_tank(self, obs): completed_factorytechlab = self.get_my_completed_units_by_type( obs, units.Terran.FactoryTechLab) free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used) if (len(completed_factorytechlab) > 0 and obs.observation.player.minerals >= 200): factorys = self.get_my_units_by_type(obs, units.Terran.Factory)[0] if factorys.order_length < 5: return actions.RAW_FUNCTIONS.Train_SiegeTank_quick("now", factorys.tag) return actions.RAW_FUNCTIONS.no_op() ############################################################################### ############################ Build Armory ################################## def build_armorys(self, obs): completed_factory = self.get_my_completed_units_by_type( obs, units.Terran.Factory) armorys = self.get_my_units_by_type(obs, units.Terran.Armory) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if (len(completed_factory) > 0 and obs.observation.player.minerals >= 200 and len(armorys) < 2 and len(scvs) > 0): rand1, rand2 = random.randint(-2,2),random.randint(-2,2) armorys_xy = (36 + rand1, 20 + rand2) if self.base_top_left else ( 20 - rand1, 50 - rand2) if 0 < armorys_xy[0] < 64 and 0 < armorys_xy[1] < 64: pass else: return actions.RAW_FUNCTIONS.no_op() distances = self.get_distances(obs, scvs, armorys_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_Armory_pt( "now", scv.tag, armorys_xy) elif (len(completed_factory) > 0 and obs.observation.player.minerals >= 200 and 1 <= len(armorys) and len(scvs) > 0): # armory upgrade 추가 armory = armorys[0] armory_xy = (armory.x, armory.y) #cloak_field = self.get_my_units_by_type(obs, upgrades.Upgrades.CloakingField)[0] if self.TerranVehicleWeaponsLevel1 == False: self.TerranVehicleWeaponsLevel1 = True return actions.RAW_FUNCTIONS.Research_TerranVehicleWeapons_quick("now", armory.tag) elif self.TerranVehicleWeaponsLevel1 == True and self.TerranVehicleWeaponsLevel2 == False: self.TerranVehicleWeaponsLevel2 = True return actions.RAW_FUNCTIONS.Research_TerranVehicleWeaponsLevel2_quick("now", armory.tag) elif self.TerranVehicleWeaponsLevel1 == True and self.TerranVehicleWeaponsLevel2 == True and self.TerranVehicleWeaponsLevel3 == False: self.TerranVehicleWeaponsLevel3 = True return actions.RAW_FUNCTIONS.Research_TerranVehicleWeaponsLevel3_quick("now", armory.tag) return actions.RAW_FUNCTIONS.no_op() ############################################################################################ #################################### StarPort ############################################## def build_starports(self, obs): completed_factorys = self.get_my_completed_units_by_type( obs, units.Terran.Factory) starports = self.get_my_units_by_type(obs, units.Terran.Starport) scvs = self.get_my_units_by_type(obs, units.Terran.SCV) if (len(completed_factorys) > 0 and obs.observation.player.minerals >= 200 and len(starports) < 1 and len(scvs) > 0): # stp_x,stp_y = (22,22), (36,46) # minerals기준 중앙부쪽 좌표 if len(starports) >= 1: rand1 = random.randint(-5,5) sx, sy = starports[0].x, starports[0].y starport_xy = (sx + rand1, sy + rand1) if self.base_top_left else (sx - rand1, sy - rand1) else: rand1, rand2 = random.randint(-5,5),random.randint(-5,5) starport_xy = (22 + rand1, 22 + rand2) if self.base_top_left else (36 - rand1, 46 - rand2) if 0 < starport_xy[0] < 64 and 0 < starport_xy[1] < 64: pass else: return actions.RAW_FUNCTIONS.no_op() distances = self.get_distances(obs, scvs, starport_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_Starport_pt( "now", scv.tag, starport_xy) ####################### 스타포트 건설 후 팩토리 증설 ######################### elif (len(starports) >= 1 and obs.observation.player.minerals >= 200 and len(completed_factorys) < 4 and len(scvs) > 0): if len(starports) >= 1: rand1 = random.randint(-5,5) sx, sy = starports[0].x, starports[0].y factory_xy = (sx + rand1, sy + rand1) if self.base_top_left else (sx - rand1, sy - rand1) else: rand1, rand2 = random.randint(-5,5),random.randint(-5,5) factory_xy = (22 + rand1, 22 + rand2) if self.base_top_left else (36 - rand1, 46 - rand2) if 0 < factory_xy[0] < 64 and 0 < factory_xy[1] < 64: pass else: return actions.RAW_FUNCTIONS.no_op() distances = self.get_distances(obs, scvs, factory_xy) scv = scvs[np.argmin(distances)] return actions.RAW_FUNCTIONS.Build_Factory_pt( "now", scv.tag, factory_xy) else: completed_barrackses = self.get_my_completed_units_by_type( obs, units.Terran.Barracks) marines = self.get_my_units_by_type(obs, units.Terran.Marine) free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used) if (len(completed_barrackses) > 0 and obs.observation.player.minerals >= 100 and free_supply > 0): barracks = self.get_my_units_by_type(obs, units.Terran.Barracks)[0] if barracks.order_length < 5: return actions.RAW_FUNCTIONS.Train_Marine_quick("now", barracks.tag) return actions.RAW_FUNCTIONS.no_op() def build_techlab_starports(self, obs): completed_starports = self.get_my_completed_units_by_type( obs, units.Terran.Starport) completed_starport_techlab = self.get_my_completed_units_by_type( obs, units.Terran.StarportTechLab) if (len(completed_starports) < 3 and obs.observation.player.minerals >= 200): stps = self.get_my_units_by_type(obs, units.Terran.Starport) if stps: for stp in stps: stp_x,stp_y = stp.x, stp.y starport_xy = (stp_x,stp_y) return actions.RAW_FUNCTIONS.Build_TechLab_Starport_pt( "now", stp.tag, starport_xy) ############ Cloak upgrade ######################### if len(completed_starport_techlab) > 0 and self.cloaking_flag: # self.cloaking_flag = 0 cloaking = upgrades.Upgrades.CloakingField stp_techlab = self.get_my_units_by_type(obs, units.Terran.StarportTechLab) if stp_techlab: stp_tech_xy = (stp_techlab[0].x, stp_techlab[0].y) cloak_field = self.get_my_units_by_type(obs, upgrades.Upgrades.CloakingField)[0] # print("stp_tech_xy: ", stp_tech_xy) # print("cloaking upgrade: ",cloak_field.tag) return actions.FUNCTIONS.Research_BansheeCloakingField_quick("now", cloaking ) return actions.RAW_FUNCTIONS.no_op() def train_banshee(self, obs): completed_starporttechlab = self.get_my_completed_units_by_type( obs, units.Terran.StarportTechLab) ravens = self.get_my_units_by_type(obs, units.Terran.Raven) free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used) if (len(completed_starporttechlab) > 0 and obs.observation.player.minerals >= 200 and free_supply > 3): starports = self.get_my_units_by_type(obs, units.Terran.Starport)[0] ############################### cloaking detecting을 위한 Raven 생산 ####################### if starports.order_length < 2 and len(ravens) < 3 : return actions.RAW_FUNCTIONS.Train_Raven_quick("now", starports.tag) ######################################################################################### if starports.order_length < 5: return actions.RAW_FUNCTIONS.Train_Banshee_quick("now", starports.tag) return actions.RAW_FUNCTIONS.no_op() ############################################################################################ def attack(self, obs): marines = self.get_my_units_by_type(obs, units.Terran.Marine) if 20 < len(marines): flag = random.randint(0,2) if flag == 1: attack_xy = (38, 44) if self.base_top_left else (19, 23) else: attack_xy = (16, 45) if self.base_top_left else (42, 19) distances = self.get_distances(obs, marines, attack_xy) marine = marines[np.argmax(distances)] #marine = marines x_offset = random.randint(-5, 5) y_offset = random.randint(-5, 5) return actions.RAW_FUNCTIONS.Attack_pt( "now", marine.tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset)) else: barracks = self.get_my_units_by_type(obs, units.Terran.Barracks) if len(barracks) > 0: barracks = barracks[0] if barracks.order_length < 5: return actions.RAW_FUNCTIONS.Train_Marine_quick("now", barracks.tag) return actions.RAW_FUNCTIONS.no_op() def attack_all(self,obs): # 추가 유닛 생길 때 마다 추가 marines = self.get_my_units_by_type(obs, units.Terran.Marine) tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTank) banshees = self.get_my_units_by_type(obs, units.Terran.Banshee) raven = self.get_my_units_by_type(obs, units.Terran.Raven) thor = self.get_my_units_by_type(obs, units.Terran.Thor) sieged_tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTankSieged) total_tanks = tanks + sieged_tanks all_units = marines + total_tanks + banshees + raven + thor if 25 < len(all_units): flag = random.randint(0,1000) if flag%4 == 0: attack_xy = (39, 45) if self.base_top_left else (19, 23) elif flag%4 == 1: attack_xy = (39, 45) if self.base_top_left else (19, 23) if len(tanks) > 0: distances = self.get_distances(obs, tanks, attack_xy) tank = tanks[np.argmax(distances)] x_offset = random.randint(-1, 1) y_offset = random.randint(-1, 1) return actions.RAW_FUNCTIONS.Morph_SiegeMode_quick( "now", tank.tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset)) elif flag%4 == 2: attack_xy = (39, 45) if self.base_top_left else (19, 23) #### siegeMode 제거 #### if len(total_tanks) > 0: all_tanks_tag = [tank.tag for tank in total_tanks] return actions.RAW_FUNCTIONS.Morph_Unsiege_quick( "now", all_tanks_tag) else: attack_xy = (17, 48) if self.base_top_left else (41, 21) x_offset = random.randint(-5, 5) y_offset = random.randint(-5, 5) all_tag = [unit.tag for unit in all_units] return actions.RAW_FUNCTIONS.Attack_pt( "now", all_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset)) else: flag = random.randint(0,1000) if flag%4 == 0: attack_xy = (35, 25) if self.base_top_left else (25, 40) elif flag%4 == 1: attack_xy = (35, 25) if self.base_top_left else (25, 40) if len(tanks) > 0: tanks_tag = [tank.tag for tank in tanks] x_offset = random.randint(-1, 1) y_offset = random.randint(-1, 1) return actions.RAW_FUNCTIONS.Morph_SiegeMode_quick( "now", tanks_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset)) elif flag%4 == 2: attack_xy = (35, 25) if self.base_top_left else (25, 40) else: attack_xy = (30, 25) if self.base_top_left else (33, 40) x_offset = random.randint(-1, 1) y_offset = random.randint(-1, 1) all_units = marines + banshees + raven + thor all_tag = [unit.tag for unit in all_units] if all_tag: return actions.RAW_FUNCTIONS.Attack_pt( "now", all_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset)) return actions.RAW_FUNCTIONS.no_op() ################################################################################### ############################### Unit Controls ##################################### def tank_control(self, obs): tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTank) sieged_tanks = self.get_my_units_by_type(obs, units.Terran.SiegeTankSieged) total_tanks = tanks + sieged_tanks if len(total_tanks) < 8: if tanks: attack_xy = (40, 25) if self.base_top_left else (25, 40) distances = self.get_distances(obs, tanks, attack_xy) distances.sort() tank_tag = [t.tag for t in tanks[:4]] x_offset = random.randint(-5, 5) y_offset = random.randint(-5, 5) return actions.RAW_FUNCTIONS.Morph_SiegeMode_quick( "now", tank_tag, (attack_xy[0] + x_offset, attack_xy[1] + y_offset)) else: #### siegeMode 제거 #### all_tanks_tag = [tank.tag for tank in total_tanks] return actions.RAW_FUNCTIONS.Morph_Unsiege_quick( "now", all_tanks_tag) return actions.RAW_FUNCTIONS.no_op() class TerranRandomAgent(TerranAgentWithRawActsAndRawObs): def step(self, obs): super(TerranRandomAgent, self).step(obs) action = random.choice(self.actions) return getattr(self, action)(obs)
_____no_output_____
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
Hyperparameter하이퍼파라미터는 심층강화학습 알고리즘에서 성능에 매우 큰 영향을 미칩니다.이 실험에 쓰인 하이퍼파라미터는 https://github.com/chucnorrisful/dqn 실험에서 제안된 값들을 참고하였습니다.- self.s_dim = 21- self.a_dim = 6- self.lr = 1e-4 * 1- self.batch_size = 32- self.gamma = 0.99- self.memory_size = 200000- self.eps_max = 1.0- self.eps_min = 0.01- self.epsilon = 1.0- self.init_sampling = 4000- self.target_update_interval = 10- self.epsilon = max(self.eps_min, self.eps_max - self.eps_min * (self.episode_count / 50))![Winning rate graph](./images/rlagent_with_vanilla_dqn_score-Terran-Terran-495_Eps.png)
class TerranRLAgentWithRawActsAndRawObs(TerranAgentWithRawActsAndRawObs): def __init__(self): super(TerranRLAgentWithRawActsAndRawObs, self).__init__() self.s_dim = 21 self.a_dim = 19 self.lr = 1e-4 * 1 self.batch_size = 32 self.gamma = 0.99 self.memory_size = 200000 self.eps_max = 1.0 self.eps_min = 0.01 self.epsilon = 1.0 self.init_sampling = 4000 self.target_update_interval = 10 self.data_file_qnet = DATA_FILE_QNET self.data_file_qnet_target = DATA_FILE_QNET_TARGET self.score_file = SCORE_FILE self.qnetwork = NaiveMultiLayerPerceptron(input_dim=self.s_dim, output_dim=self.a_dim, num_neurons=[128], hidden_act_func='ReLU', out_act_func='Identity').to(device) self.qnetwork_target = NaiveMultiLayerPerceptron(input_dim=self.s_dim, output_dim=self.a_dim, num_neurons=[128], hidden_act_func='ReLU', out_act_func='Identity').to(device) ############################################ qnet 로드하면 이전 모델이라 학습모델 인풋 아웃풋차원이 바뀜 ######### if os.path.isfile(self.data_file_qnet + '.pt'): self.qnetwork.load_state_dict(torch.load(self.data_file_qnet + '.pt')) if os.path.isfile(self.data_file_qnet_target + '.pt'): self.qnetwork_target.load_state_dict(torch.load(self.data_file_qnet_target + '.pt')) # initialize target network same as the main network. self.qnetwork_target.load_state_dict(self.qnetwork.state_dict()) self.dqn = DQN(state_dim=self.s_dim, action_dim=self.a_dim, qnet=self.qnetwork, qnet_target=self.qnetwork_target, lr=self.lr, gamma=self.gamma, epsilon=self.epsilon).to(device) self.memory = ExperienceReplayMemory(self.memory_size) self.print_every = 1 self.cum_reward = 0 self.cum_loss = 0 self.episode_count = 0 self.new_game() def reset(self): super(TerranRLAgentWithRawActsAndRawObs, self).reset() self.new_game() def new_game(self): self.base_top_left = None self.previous_state = None self.previous_action = None self.cum_reward = 0 self.cum_loss = 0 # epsilon scheduling # slowly decaying_epsilon self.epsilon = max(self.eps_min, self.eps_max - self.eps_min * (self.episode_count / 50)) self.dqn.epsilon = torch.tensor(self.epsilon).to(device) def get_state(self, obs): scvs = self.get_my_units_by_type(obs, units.Terran.SCV) idle_scvs = [scv for scv in scvs if scv.order_length == 0] command_centers = self.get_my_units_by_type(obs, units.Terran.CommandCenter) supply_depots = self.get_my_units_by_type(obs, units.Terran.SupplyDepot) completed_supply_depots = self.get_my_completed_units_by_type( obs, units.Terran.SupplyDepot) barrackses = self.get_my_units_by_type(obs, units.Terran.Barracks) completed_barrackses = self.get_my_completed_units_by_type( obs, units.Terran.Barracks) marines = self.get_my_units_by_type(obs, units.Terran.Marine) queued_marines = (completed_barrackses[0].order_length if len(completed_barrackses) > 0 else 0) free_supply = (obs.observation.player.food_cap - obs.observation.player.food_used) can_afford_supply_depot = obs.observation.player.minerals >= 100 can_afford_barracks = obs.observation.player.minerals >= 150 can_afford_marine = obs.observation.player.minerals >= 100 enemy_scvs = self.get_enemy_units_by_type(obs, units.Terran.SCV) enemy_idle_scvs = [scv for scv in enemy_scvs if scv.order_length == 0] enemy_command_centers = self.get_enemy_units_by_type( obs, units.Terran.CommandCenter) enemy_supply_depots = self.get_enemy_units_by_type( obs, units.Terran.SupplyDepot) enemy_completed_supply_depots = self.get_enemy_completed_units_by_type( obs, units.Terran.SupplyDepot) enemy_barrackses = self.get_enemy_units_by_type(obs, units.Terran.Barracks) enemy_completed_barrackses = self.get_enemy_completed_units_by_type( obs, units.Terran.Barracks) enemy_marines = self.get_enemy_units_by_type(obs, units.Terran.Marine) return (len(command_centers), len(scvs), len(idle_scvs), len(supply_depots), len(completed_supply_depots), len(barrackses), len(completed_barrackses), len(marines), queued_marines, free_supply, can_afford_supply_depot, can_afford_barracks, can_afford_marine, len(enemy_command_centers), len(enemy_scvs), len(enemy_idle_scvs), len(enemy_supply_depots), len(enemy_completed_supply_depots), len(enemy_barrackses), len(enemy_completed_barrackses), len(enemy_marines)) def step(self, obs): super(TerranRLAgentWithRawActsAndRawObs, self).step(obs) #time.sleep(0.5) state = self.get_state(obs) state = torch.tensor(state).float().view(1, self.s_dim).to(device) action_idx = self.dqn.choose_action(state) action = self.actions[action_idx] done = True if obs.last() else False if self.previous_action is not None: experience = (self.previous_state.to(device), torch.tensor(self.previous_action).view(1, 1).to(device), torch.tensor(obs.reward).view(1, 1).to(device), state.to(device), torch.tensor(done).view(1, 1).to(device)) self.memory.push(experience) self.cum_reward += obs.reward self.previous_state = state self.previous_action = action_idx if obs.last(): self.episode_count = self.episode_count + 1 if len(self.memory) >= self.init_sampling: # training dqn sampled_exps = self.memory.sample(self.batch_size) sampled_exps = prepare_training_inputs(sampled_exps, device) self.dqn.learn(*sampled_exps) if self.episode_count % self.target_update_interval == 0: self.dqn.qnet_target.load_state_dict(self.dqn.qnet.state_dict()) if self.episode_count % self.print_every == 0: msg = (self.episode_count, self.cum_reward, self.epsilon) print("Episode : {:4.0f} | Cumulative Reward : {:4.0f} | Epsilon : {:.3f}".format(*msg)) torch.save(self.dqn.qnet.state_dict(), self.data_file_qnet + '.pt') torch.save(self.dqn.qnet_target.state_dict(), self.data_file_qnet_target + '.pt') scores_window.append(obs.reward) # save most recent reward win_rate = scores_window.count(1)/len(scores_window)*100 tie_rate = scores_window.count(0)/len(scores_window)*100 lost_rate = scores_window.count(-1)/len(scores_window)*100 scores.append([win_rate, tie_rate, lost_rate]) # save most recent score(win_rate, tie_rate, lost_rate) with open(self.score_file + '.txt', "wb") as fp: pickle.dump(scores, fp) #writer.add_scalar("Loss/train", self.cum_loss/obs.observation.game_loop, self.episode_count) writer.add_scalar("Score", self.cum_reward, self.episode_count) return getattr(self, action)(obs) if __name__ == "__main__": app.run(main)
I0922 23:23:02.756312 4616515008 sc_process.py:135] Launching SC2: /Applications/StarCraft II/Versions/Base81102/SC2.app/Contents/MacOS/SC2 -listen 127.0.0.1 -port 19112 -dataDir /Applications/StarCraft II/ -tempDir /var/folders/r1/x6k135_915z463fc7lc4hkp40000gn/T/sc-m9gntgxu/ -displayMode 0 -windowwidth 640 -windowheight 480 -windowx 50 -windowy 50 I0922 23:23:02.777318 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 0, running: True I0922 23:23:03.782161 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 1, running: True I0922 23:23:04.785537 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 2, running: True I0922 23:23:05.792152 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 3, running: True I0922 23:23:06.797986 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 4, running: True I0922 23:23:07.802450 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 5, running: True I0922 23:23:08.808930 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 6, running: True I0922 23:23:09.812354 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 7, running: True I0922 23:23:10.816801 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 8, running: True I0922 23:23:11.818795 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 9, running: True I0922 23:23:12.820305 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 10, running: True I0922 23:23:13.825726 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 11, running: True I0922 23:23:14.828522 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 12, running: True I0922 23:23:15.832547 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 13, running: True I0922 23:23:16.834920 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 14, running: True I0922 23:23:17.841595 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 15, running: True I0922 23:23:18.845139 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 16, running: True I0922 23:23:19.846853 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 17, running: True I0922 23:23:20.848951 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 18, running: True I0922 23:23:21.852277 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 19, running: True I0922 23:23:22.856023 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 20, running: True I0922 23:23:23.861757 4616515008 remote_controller.py:167] Connecting to: ws://127.0.0.1:19112/sc2api, attempt: 21, running: True I0922 23:23:33.645176 4616515008 sc2_env.py:314] Environment is ready I0922 23:23:33.653440 4616515008 sc2_env.py:507] Starting episode 1: [terran, random] on Simple64 I0922 23:23:35.562937 4616515008 sc2_env.py:752] Environment Close I0922 23:25:39.113759 4616515008 sc2_env.py:725] Episode 1 finished after 15944 game steps. Outcome: [1], reward: [1], score: [11323]
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
[Winning rate graph]
!pip install matplotlib import pickle import numpy as np import matplotlib.pyplot as plt %matplotlib inline SCORE_FILE = 'rlagent_with_vanilla_dqn_score' with open(SCORE_FILE + '.txt', "rb") as fp: scores = pickle.load(fp) np_scores = np.array(scores) np_scores # plot the scores fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(len(np_scores)), np_scores.T[0], color='r', label='win rate') plt.plot(np.arange(len(np_scores)), np_scores.T[1], color='g', label='tie rate') plt.plot(np.arange(len(np_scores)), np_scores.T[2], color='b', label='lose rate') plt.ylabel('Score %') plt.xlabel('Episode #') plt.legend(loc='best') plt.show() f = file.open()
_____no_output_____
MIT
s10336/STEP4-making-drl-pysc2-agent.ipynb
parksurk/skcc-drl-sc2-course-2020_1st
Copyright 2018 The TensorFlow Authors.
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
Custom training: basics Run in Google Colab View source on GitHub In the previous tutorial we covered the TensorFlow APIs for automatic differentiation, a basic building block for machine learning.In this tutorial we will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.TensorFlow also includes a higher-level neural networks API (`tf.keras`) which provides useful abstractions to reduce boilerplate. We strongly recommend those higher level APIs for people working with neural networks. However, in this short tutorial we cover neural network training from first principles to establish a strong foundation. Setup
from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. %tensorflow_version 2.x except Exception: pass import tensorflow.compat.v1 as tf
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
VariablesTensors in TensorFlow are immutable stateless objects. Machine learning models, however, need to have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language:
# Using python state x = tf.zeros([10, 10]) x += 2 # This is equivalent to x = x + 2, which does not mutate the original # value of x print(x)
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
TensorFlow, however, has stateful operations built in, and these are often more pleasant to use than low-level Python representations of your state. To represent weights in a model, for example, it's often convenient and efficient to use TensorFlow variables.A Variable is an object which stores a value and, when used in a TensorFlow computation, will implicitly read from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc) which manipulate the value stored in a TensorFlow variable.
v = tf.Variable(1.0) assert v.numpy() == 1.0 # Re-assign the value v.assign(3.0) assert v.numpy() == 3.0 # Use `v` in a TensorFlow operation like tf.square() and reassign v.assign(tf.square(v)) assert v.numpy() == 9.0
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
Computations using Variables are automatically traced when computing gradients. For Variables representing embeddings TensorFlow will do sparse updates by default, which are more computation and memory efficient.Using Variables is also a way to quickly let a reader of your code know that this piece of state is mutable. Example: Fitting a linear modelLet's now put the few concepts we have so far ---`Tensor`, `GradientTape`, `Variable` --- to build and train a simple model. This typically involves a few steps:1. Define the model.2. Define a loss function.3. Obtain training data.4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.In this tutorial, we'll walk through a trivial example of a simple linear model: `f(x) = x * W + b`, which has two variables - `W` and `b`. Furthermore, we'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`. Define the modelLet's define a simple class to encapsulate the variables and the computation.
class Model(object): def __init__(self): # Initialize variable to (5.0, 0.0) # In practice, these should be initialized to random values. self.W = tf.Variable(5.0) self.b = tf.Variable(0.0) def __call__(self, x): return self.W * x + self.b model = Model() assert model(3.0).numpy() == 15.0
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
Define a loss functionA loss function measures how well the output of a model for a given input matches the desired output. Let's use the standard L2 loss.
def loss(predicted_y, desired_y): return tf.reduce_mean(tf.square(predicted_y - desired_y))
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
Obtain training dataLet's synthesize the training data with some noise.
TRUE_W = 3.0 TRUE_b = 2.0 NUM_EXAMPLES = 1000 inputs = tf.random_normal(shape=[NUM_EXAMPLES]) noise = tf.random_normal(shape=[NUM_EXAMPLES]) outputs = inputs * TRUE_W + TRUE_b + noise
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
Before we train the model let's visualize where the model stands right now. We'll plot the model's predictions in red and the training data in blue.
import matplotlib.pyplot as plt plt.scatter(inputs, outputs, c='b') plt.scatter(inputs, model(inputs), c='r') plt.show() print('Current loss: '), print(loss(model(inputs), outputs).numpy())
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
Define a training loopWe now have our network and our training data. Let's train it, i.e., use the training data to update the model's variables (`W` and `b`) so that the loss goes down using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent). There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer` implementations. We'd highly recommend using those implementations, but in the spirit of building from first principles, in this particular example we will implement the basic math ourselves.
def train(model, inputs, outputs, learning_rate): with tf.GradientTape() as t: current_loss = loss(model(inputs), outputs) dW, db = t.gradient(current_loss, [model.W, model.b]) model.W.assign_sub(learning_rate * dW) model.b.assign_sub(learning_rate * db)
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
Finally, let's repeatedly run through the training data and see how `W` and `b` evolve.
model = Model() # Collect the history of W-values and b-values to plot later Ws, bs = [], [] epochs = range(10) for epoch in epochs: Ws.append(model.W.numpy()) bs.append(model.b.numpy()) current_loss = loss(model(inputs), outputs) train(model, inputs, outputs, learning_rate=0.1) print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' % (epoch, Ws[-1], bs[-1], current_loss)) # Let's plot it all plt.plot(epochs, Ws, 'r', epochs, bs, 'b') plt.plot([TRUE_W] * len(epochs), 'r--', [TRUE_b] * len(epochs), 'b--') plt.legend(['W', 'b', 'true W', 'true_b']) plt.show()
_____no_output_____
Apache-2.0
site/en/r1/tutorials/eager/custom_training.ipynb
PhilipMay/docs
ENGR 1330 Exam 1 Sec 003/004 Fall 2020Take Home Portion of Exam 1 Full name R: HEX: ENGR 1330 Exam 1 Sec 003/004 Date: Question 1 (1 pts):Run the cell below, and leave the results in your notebook (Windows users may get an error, leave the error in place)
#### RUN! the Cell #### import sys ! hostname ! whoami print(sys.executable) # OK if generates an exception message on Windows machines
atomickitty.aws compthink /opt/conda/envs/python/bin/python
CC0-1.0
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
Question 2 (9 pts):- __When it is 8:00 in Lubbock,__ - __It is 9:00 in New York__ - __It is 14:00 in London__ - __It is 15:00 in Cairo__ - __It is 16:00 in Istanbul__ - __It is 19:00 in Hyderabad__ - __It is 22:00 in Tokyo__ __Write a function that reports the time in New York, London, Cairo, Istanbul, Hyderabad, and Tokyo based on the time in Lubbock. Use a 24-hour time format. Include error trapping that:__1- Issues a message like "Please Enter A Number from 00 to 23" if the first input is numeric but outside the range of [0,23].2- Takes any numeric input for "Lubbock time" selection , and forces it into an integer.3- Issues an appropriate message if the user's selection is non-numeric.__Check your function for these times:__- 8:00- 17:00- 0:00
def LBBtime(): try: LBK = int(input('What hour is it in Lubbock?- Please enter a number from 0 to 23')) if LBK>23: print('Please Enter A Number from 00 to 23') if LBK<23 and LBK>=0: if LBK+1>23: print("Time in New York is",(LBK+1)-24,":00") else: print("Time in New York is",(LBK+1),":00") if LBK+6>23: print("Time in London is",(LBK+6)-24,":00") else: print("Time in London is",(LBK+6),":00") if LBK+7>23: print("Time in Cairo is",(LBK+7)-24,":00") else: print("Time in Cairo is",(LBK+7),":00") if LBK+8>23: print("Time in Istanbul is",(LBK+8)-24,":00") else: print("Time in Istanbul is",(LBK+8),":00") if LBK+11>23: print("Time in Hyderabad is",(LBK+11)-24,":00") else: print("Time in Hyderabad is",(LBK+11),":00") if LBK+14>23: print("Time in Tokyo is",(LBK+14)-24,":00") else: print("Time in Tokyo is",(LBK+14),":00") return #null return except: print("Please Enter an Appropriate Input") LBBtime() LBBtime() LBBtime()
What hour is it in Lubbock?- Please enter a number from 0 to 23 0
CC0-1.0
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
Question 3 (28 pts): Follow the steps below. Add comments to your script and signify when each step and each task is done. *hint: For this problem you will need the numpy and pandas libraries.- __STEP1: There are 8 digits in your R. Define a 2x4 array with these 8 digits, name it "Rarray", and print it__- __STEP2: Find the maximum value of the "Rarray" and its position__- __STEP3: Sort the "Rarray" along the rows, store it in a new array named "Rarraysort", and print the new array out__- __STEP4: Define and print a 4x4 array that has the "Rarray" as its two first rows, and "Rarraysort" as its next rows. Name this new array "DoubleRarray"__- __STEP5: Slice and print a 4x3 array from the "DoubleRarray" that contains the last three columns of it. Name this new array "SliceRarray".__- __STEP6: Define the "SliceRarray" as a panda dataframe:__ - name it "Rdataframe", - name the rows as "Row A","Row B","Row C", and "Row D" - name the columns as "Column 1", "Column 2", and "Column 3"- __STEP7: Print the first few rows of the "Rdataframe".__- __STEP8: Create a new dataframe object ("R2dataframe") by adding a column to the "Rdataframe", name it "Column X" and fill it with "None" values. Then, use the appropriate descriptor function and print the data model (data column count, names, data types) of the "R2dataframe"__- __STEP9: Replace the **'None'** in the "R2dataframe" with 0. Then, print the summary statistics of each numeric column in the data frame.__- __STEP10: Define a function based on the equation below, apply on the entire "R2dataframe", store the results in a new dataframe ("R3dataframe"), and print the results and the summary statistics again.__ $$ y = x^2 - 5x +7 $$- __STEP11: Print the number of occurrences of each unique value in "Column 3"__- __STEP12: Sort the data frame with respect to "Column 1" with a descending order and print it__- __STEP13: Write the final format of the "R3dataframe" on a CSV file, named "Rfile.csv"__- __STEP14: Read the "Rfile.csv" and print its content.__** __Make sure to attach the "Rfile.csv" file to your midterm exam submission.__
# Code and Run your solution here: print('#Step0: Install Dependencies') import numpy as np import pandas as pd print('#Step1: Create the array') Rarray = np.array([[1,6,7,4],[5,2,3,8]]) #Define Rarray print(Rarray) print('#Step2: find max and its position ') print(np.max(Rarray)) #Find the maximum Value print(np.argmax(Rarray)) #Find the posirtion of the maximum value print('#Step3: Sort the array') Rarraysort = np.sort(Rarray,axis = 1) #Sort Rarray along the rows and define a new array print(Rarraysort) print('#Step4: Create the double array - manual entry') DoubleRarray = np.array([[1,6,7,4],[5,2,3,8],[1,4,6,7],[2,3,5,8]]) #Define DoubleRarray print(DoubleRarray) print('#Step5: Slice the array') SliceRarray = DoubleRarray[0:4,1:4] #Slice DoubleRarray and Define SliceRarray print(SliceRarray) print('#Step6: Make a dataframey') myrowname = ["Row A","Row B","Row C","Row D"] mycolname = ["Column 1", "Column 2","Column 3"] Rdataframe = pd.DataFrame(SliceRarray,myrowname,mycolname) #Define Rdataframe print('#Step7: head method on dataframe') print(Rdataframe.head()) #Print the first few rows of the Rdataframe print('#Step8: add column to a dataframe') Rdataframe['Column X']= None #Add a new column, "Column X" R2dataframe = Rdataframe #Define R2dataframe print(R2dataframe.info()) #Get the info print('#Step9: Replace NA') R2dataframe = R2dataframe.fillna(0) #Replace NAs with 0 print(R2dataframe.describe()) #Get the summary statistics print('#Step10: Define a function, apply to a dataframe') def myfunc(x): # A user-built function y = (x**2) - (10*x) +7 return(y) R3dataframe = R2dataframe.apply(myfunc) #Apply the function on the entire R2dataframe print(R3dataframe) print(R3dataframe.describe()) print('#Step11: Descriptors') print(R3dataframe['Column 3'].value_counts()) #Returns the number of occurences of each unique value in Column 3 print('#Step12: Sort on values') print(R3dataframe.sort_values('Column 1', ascending = False)) #Sorting based on Column 1 print('#Step13: Write to an external file') R3dataframe.to_csv('Rfile.csv') #Write R3dataframe on a CSV file print('#Step14: Verify the write') readfilecsv = pd.read_csv('Rfile.csv') #Read the Rfile.csv print(readfilecsv) #Print the contents of the Rfile.csv
#Step0: Install Dependencies #Step1: Create the array [[1 6 7 4] [5 2 3 8]] #Step2: find max and its position 8 7 #Step3: Sort the array [[1 4 6 7] [2 3 5 8]] #Step4: Create the double array - manual entry [[1 6 7 4] [5 2 3 8] [1 4 6 7] [2 3 5 8]] #Step5: Slice the array [[6 7 4] [2 3 8] [4 6 7] [3 5 8]] #Step6: Make a dataframey #Step7: head method on dataframe Column 1 Column 2 Column 3 Row A 6 7 4 Row B 2 3 8 Row C 4 6 7 Row D 3 5 8 #Step8: add column to a dataframe <class 'pandas.core.frame.DataFrame'> Index: 4 entries, Row A to Row D Data columns (total 4 columns): # Column Non-Null Count Dtype --- ------ -------------- ----- 0 Column 1 4 non-null int64 1 Column 2 4 non-null int64 2 Column 3 4 non-null int64 3 Column X 0 non-null object dtypes: int64(3), object(1) memory usage: 160.0+ bytes None #Step9: Replace NA Column 1 Column 2 Column 3 Column X count 4.000000 4.000000 4.000000 4.0 mean 3.750000 5.250000 6.750000 0.0 std 1.707825 1.707825 1.892969 0.0 min 2.000000 3.000000 4.000000 0.0 25% 2.750000 4.500000 6.250000 0.0 50% 3.500000 5.500000 7.500000 0.0 75% 4.500000 6.250000 8.000000 0.0 max 6.000000 7.000000 8.000000 0.0 #Step10: Define a function, apply to a dataframe Column 1 Column 2 Column 3 Column X Row A -17 -14 -17 7 Row B -9 -14 -9 7 Row C -17 -17 -14 7 Row D -14 -18 -9 7 Column 1 Column 2 Column 3 Column X count 4.000000 4.000000 4.000000 4.0 mean -14.250000 -15.750000 -12.250000 7.0 std 3.774917 2.061553 3.947573 0.0 min -17.000000 -18.000000 -17.000000 7.0 25% -17.000000 -17.250000 -14.750000 7.0 50% -15.500000 -15.500000 -11.500000 7.0 75% -12.750000 -14.000000 -9.000000 7.0 max -9.000000 -14.000000 -9.000000 7.0 #Step11: Descriptors -9 2 -14 1 -17 1 Name: Column 3, dtype: int64 #Step12: Sort on values Column 1 Column 2 Column 3 Column X Row B -9 -14 -9 7 Row D -14 -18 -9 7 Row A -17 -14 -17 7 Row C -17 -17 -14 7 #Step13: Write to an external file #Step14: Verify the write Unnamed: 0 Column 1 Column 2 Column 3 Column X 0 Row A -17 -14 -17 7 1 Row B -9 -14 -9 7 2 Row C -17 -17 -14 7 3 Row D -14 -18 -9 7
CC0-1.0
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
Problem 4 (32 pts)Graphing Functions Special Functions Consider the two functions listed below:\begin{equation}f(x) = e^{-\alpha x}\label{eqn:fofx}\end{equation}\begin{equation}g(x) = \gamma sin(\beta x)\label{eqn:gofx}\end{equation}Prepare a plot of the two functions on the same graph. Use the values in Table below for $\alpha$, $\beta$, and $\gamma$.|Parameter|Value||:---|---:||$\alpha$|0.50||$\beta$|3.00||$\gamma$|$\frac{\pi}{2}$|The plot should have $x$ values ranging from $0$ to $10$ (inclusive) in sufficiently small increments to see curvature in the two functions as well as to identify the number and approximate locations of intersections. In this problem, intersections are locations in the $x-y$ plane where the two "curves" cross one another of the two plots. By-hand evaluate f(x) for x=1, alpha = 1/2 (Simply enter your answer from a calculator)f(x) = 0.61 By-hand evaluate g(x) for x=3.14, beta = 1/2, gamma = 2 (Simply enter your answer from a calculator)g(x) = 1.99
# Define the first function f(x,alpha), test the function using your by hand answer # Define the first function f(x,alpha), test the function using your by hand answer def f(x,alpha): import math f = math.exp(-1.0*alpha*x) return f f(1,0.5) # Define the second function g(x,beta,gamma), test the function using your by hand answer def g(x,beta,gamma): import math f = gamma*math.sin(beta*x) return f g(3.14,0.5,2.0) # Built a list for x that ranges from 0 to 10, inclusive, with adjustable step sizes for plotting later on howMany = 100 scale = 10.0/howMany xvector = [] for i in range(0,howMany+1): xvector.append(scale*i) #xvector # activate to display # xvector # Build a plotting function that plots both functions on the same chart # Build a plotting function that plots both functions on the same chart import mplcursors alpha = 0.5 beta = 3.0 gamma = 1.57 yf = [] yg = [] for i in range(0,howMany+1): yf.append(f(xvector[i],alpha)) yg.append(g(xvector[i],beta,gamma)) def plot2lines(list11,list21,list12,list22,strx,stry,strtitle): # plot list1 on x, list2 on y, xlabel, ylabel, title from matplotlib import pyplot as plt # import the plotting library from matplotlibplt.show() plt.plot( list11, list21, color ='green', marker ='s', linestyle ='dashdot' , label = "Observed" ) # create a line chart, years on x-axis, gdp on y-axis plt.plot( list12, list22, color ='red', marker ='o', linestyle ='solid' , label = "Model") # create a line chart, years on x-axis, gdp on y-axis plt.legend() plt.title(strtitle)# add a title plt.ylabel(stry)# add a label to the x and y-axes plt.xlabel(strx) mplcursors.cursor() plt.show() # display the plot return #null return plot2lines(xvector,yf,xvector,yg,'x-value','y-value','plot of f and g') # Using the plot as a guide, find the approximate values of x where the two curves intercept (i.e. f(x) = g(x)) # You can either use interactive input, or direct specify x values, but need to show results # Using the plot as a guide, find the values of x where the two curves intercept (i.e. f(x) = g(x)) #xguess = float(input('my guess for x')) # ~0.7, and 6.25 alpha = 0.5 beta = 0.5 gamma = 2.0 xguess = 1 result = f(xguess,alpha) - g(xguess,beta,gamma) print('f(x) - g(x) =', result,' at x = ', xguess) xguess = 2 result = f(xguess,alpha) - g(xguess,beta,gamma) print('f(x) - g(x) =', result,' at x = ', xguess)
f(x) - g(x) = -0.3523204174957726 at x = 1 f(x) - g(x) = -1.3150625284443507 at x = 2
CC0-1.0
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
Bonus Problem 1. Extra Credit (You must complete the regular problems)!__create a class to compute the average grade (out of 10) of the students based on their grades in Quiz1, Quiz2, the Mid-term, Quiz3, and the Final exam.__| Student Name | Quiz 1 | Quiz 2 | Mid-term | Quiz 3 | Final Exam || ------------- | -----------| -----------| -------------| -----------| -------------|| Harry | 8 | 9 | 8 | 10 | 9 || Ron | 7 | 8 | 8 | 7 | 9 || Hermione | 10 | 10 | 9 | 10 | 10 || Draco | 8 | 7 | 9 | 8 | 9 || Luna | 9 | 8 | 7 | 6 | 5 |1. __Use docstrings to describe the purpose of the class.__2. __Create an object for each car brand and display the output as shown below.__"Student Name": **Average Grade** 3. __Create and print out a dictionary with the student names as keys and their average grades as data.__
#Code and run your solution here: #Suggested Solution: class Hogwarts: """This class calculates the average grade of the students""" def __init__(self, Name,Quiz1,Quiz2,MidTerm,Quiz3,Final): self.Name = Name self.Quiz1 = Quiz1 self.Quiz2 = Quiz2 self.MidTerm = MidTerm self.Quiz3 = Quiz3 self.Final= Final def average(self): return (self.Quiz1 + self.Quiz2 + self.MidTerm + self.Quiz3 + self.Final) /5 S1 = Hogwarts('Harry',8,9,8,10,9) #Fill the instances S2 = Hogwarts('Ron',7,8,8,7,9) S3 = Hogwarts('Hermione',10,10,9,10,10) S4 = Hogwarts('Draco',8,7,9,8,9) S5 = Hogwarts('Luna',9,8,7,6,5) print("Harry", S1.average()) print("Ron", S2.average()) print("Hermione", S3.average()) print("Draco", S4.average()) print("Luna", S5.average()) GradeDict = {"Harry":S1.average(),"Ron":S2.average(),"Hermione":S3.average(),"Draco":S4.average(),"Luna":S5.average()} print(GradeDict)
Harry 8.8 Ron 7.8 Hermione 9.8 Draco 8.2 Luna 7.0 {'Harry': 8.8, 'Ron': 7.8, 'Hermione': 9.8, 'Draco': 8.2, 'Luna': 7.0}
CC0-1.0
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
Bonus 2 Extra credit (You must complete the regular problems)! Write the VOLUME Function to compute the volume of Cylinders, Spheres, Cones, and Rectangular Boxes. This function should:- First, ask the user about __the shape of the object__ of interest using this statement:**"Please choose the shape of the object. Enter 1 for "Cylinder", 2 for "Sphere", 3 for "Cone", or 4 for "Rectangular Box""**- Second, based on user's choice in the previous step, __ask for the right inputs__.- Third, print out an statement with __the input values and the calculated volumes__. Include error trapping that:1. Issues a message that **"The object should be either a Cylinder, a Sphere, a Cone, or a Rectangular Box. Please Enter A Number from 1,2,3, and 4!"** if the first input is non-numeric.2. Takes any numeric input for the initial selection , and force it into an integer.4. Issues an appropriate message if the user's selection is numeric but outside the range of [1,4]3. Takes any numeric input for the shape characteristics , and force it into a float.4. Issues an appropriate message if the object characteristics are as non-numerics. Test the script for:1. __Sphere, r=10__2. __r=10 , Sphere__3. __Rectangular Box, w=5, h=10, l=0.5__- __Volume of a Cylinder = πr²h__- __Volume of a Sphere = 4(πr3)/3__- __Volume of a Cone = (πr²h)/3__- __Volume of a Rectangular Box = whl__
#Code and Run your solution here #Suggested Solution: import numpy as np # import NumPy: for large, multi-dimensional arrays and matrices, along with high-level mathematical functions to operate on these arrays. pi = np.pi #pi value from the np package def VOLUME(): try: UI = input('Please choose the shape of the object. Enter 1 for "Cylinder", 2 for "Sphere", 3 for "Cone", or 4 for "Rectangular Box"') UI =int(UI) if UI==1: try: UI2 = input('Please enter the radius of the Cylinder') r= float(UI2) UI3 = input('Please enter the height of the Cylinder') h= float(UI3) V= pi*h*r**2 print("The volume of the Cylinder with the radius of ",r," and the height of ",h," is equal to", V) except: print("The radius and height of the Cylinder must be numerics. Please Try Again!") elif UI==2: try: UI2 = input('Please enter the radius of the Sphere') r= float(UI2) V= (4*pi*r**3)/3 print("The volume of the Sphere with the radius of ",r," is equal to", V) except: print("The radius of the Sphere must be numeric. Please Try Again!") elif UI==3: try: UI2 = input('Please enter the radius of the Cone') r= float(UI2) UI3 = input('Please enter the height of the Cone') h= float(UI3) V= (pi*h*r**2)/3 print("The volume of the Cone with the radius of ",r," and the height of ",h," is equal to", V) except: print("The radius and height of the Cone must be numerics. Please Try Again!") elif UI==4: try: UI2 = input('Please enter the width of the Rectangular Box') w= float(UI2) UI3 = input('Please enter the height of the Rectangular Box') h= float(UI3) UI4 = input('Please enter the length of the Rectangular Box') l= float(UI4) V= w*h*l print("The volume of the Rectangular Box with the width of ",w," and the height of ",h," and the length of ",l," is equal to", V) except: print("The width, height, and length of the Rectangular Box must be numerics. Please Try Again!") else: print("Please Enter A Number from 1,2,3, and 4!") except: print("The object should be either a Cylinder, a Sphere, a Cone, or a Rectangular Box. Please Enter A Number from 1,2,3, and 4!") VOLUME()
Please choose the shape of the object. Enter 1 for "Cylinder", 2 for "Sphere", 3 for "Cone", or 4 for "Rectangular Box" 1 Please enter the radius of the Cylinder 1 Please enter the height of the Cylinder 1
CC0-1.0
5-ExamProblems/.src/EX1-F2020-Solution/.ipynb_checkpoints/Exam1-Deploy-Solutions-checkpoint.ipynb
dustykat/engr-1330-psuedo-course
转置卷积:label:`sec_transposed_conv`到目前为止,我们所见到的卷积神经网络层,例如卷积层( :numref:`sec_conv_layer`)和汇聚层( :numref:`sec_pooling`),通常会减少下采样输入图像的空间维度(高和宽)。然而如果输入和输出图像的空间维度相同,在以像素级分类的语义分割中将会很方便。例如,输出像素所处的通道维可以保有输入像素在同一位置上的分类结果。为了实现这一点,尤其是在空间维度被卷积神经网络层缩小后,我们可以使用另一种类型的卷积神经网络层,它可以增加上采样中间层特征图的空间维度。在本节中,我们将介绍*转置卷积*(transposed convolution) :cite:`Dumoulin.Visin.2016`,用于逆转下采样导致的空间尺寸减小。
from mxnet import init, np, npx from mxnet.gluon import nn from d2l import mxnet as d2l npx.set_np()
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
基本操作让我们暂时忽略通道,从基本的转置卷积开始,设步幅为1且没有填充。假设我们有一个$n_h \times n_w$的输入张量和一个$k_h \times k_w$的卷积核。以步幅为1滑动卷积核窗口,每行$n_w$次,每列$n_h$次,共产生$n_h n_w$个中间结果。每个中间结果都是一个$(n_h + k_h - 1) \times (n_w + k_w - 1)$的张量,初始化为0。为了计算每个中间张量,输入张量中的每个元素都要乘以卷积核,从而使所得的$k_h \times k_w$张量替换中间张量的一部分。请注意,每个中间张量被替换部分的位置与输入张量中元素的位置相对应。最后,所有中间结果相加以获得最终结果。例如, :numref:`fig_trans_conv`解释了如何为$2\times 2$的输入张量计算卷积核为$2\times 2$的转置卷积。![卷积核为 $2\times 2$ 的转置卷积。阴影部分是中间张量的一部分,也是用于计算的输入和卷积核张量元素。 ](../img/trans_conv.svg):label:`fig_trans_conv`我们可以对输入矩阵`X`和卷积核矩阵`K`(**实现基本的转置卷积运算**)`trans_conv`。
def trans_conv(X, K): h, w = K.shape Y = np.zeros((X.shape[0] + h - 1, X.shape[1] + w - 1)) for i in range(X.shape[0]): for j in range(X.shape[1]): Y[i: i + h, j: j + w] += X[i, j] * K return Y
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
与通过卷积核“减少”输入元素的常规卷积(在 :numref:`sec_conv_layer`中)相比,转置卷积通过卷积核“广播”输入元素,从而产生大于输入的输出。我们可以通过 :numref:`fig_trans_conv`来构建输入张量`X`和卷积核张量`K`从而[**验证上述实现输出**]。此实现是基本的二维转置卷积运算。
X = np.array([[0.0, 1.0], [2.0, 3.0]]) K = np.array([[0.0, 1.0], [2.0, 3.0]]) trans_conv(X, K)
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
或者,当输入`X`和卷积核`K`都是四维张量时,我们可以[**使用高级API获得相同的结果**]。
X, K = X.reshape(1, 1, 2, 2), K.reshape(1, 1, 2, 2) tconv = nn.Conv2DTranspose(1, kernel_size=2) tconv.initialize(init.Constant(K)) tconv(X)
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
[**填充、步幅和多通道**]与常规卷积不同,在转置卷积中,填充被应用于的输出(常规卷积将填充应用于输入)。例如,当将高和宽两侧的填充数指定为1时,转置卷积的输出中将删除第一和最后的行与列。
tconv = nn.Conv2DTranspose(1, kernel_size=2, padding=1) tconv.initialize(init.Constant(K)) tconv(X)
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
在转置卷积中,步幅被指定为中间结果(输出),而不是输入。使用 :numref:`fig_trans_conv`中相同输入和卷积核张量,将步幅从1更改为2会增加中间张量的高和权重,因此输出张量在 :numref:`fig_trans_conv_stride2`中。![卷积核为$2\times 2$,步幅为2的转置卷积。阴影部分是中间张量的一部分,也是用于计算的输入和卷积核张量元素。](../img/trans_conv_stride2.svg):label:`fig_trans_conv_stride2`以下代码可以验证 :numref:`fig_trans_conv_stride2`中步幅为2的转置卷积的输出。
tconv = nn.Conv2DTranspose(1, kernel_size=2, strides=2) tconv.initialize(init.Constant(K)) tconv(X)
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
对于多个输入和输出通道,转置卷积与常规卷积以相同方式运作。假设输入有$c_i$个通道,且转置卷积为每个输入通道分配了一个$k_h\times k_w$的卷积核张量。当指定多个输出通道时,每个输出通道将有一个$c_i\times k_h\times k_w$的卷积核。同样,如果我们将$\mathsf{X}$代入卷积层$f$来输出$\mathsf{Y}=f(\mathsf{X})$,并创建一个与$f$具有相同的超参数、但输出通道数量是$\mathsf{X}$中通道数的转置卷积层$g$,那么$g(Y)$的形状将与$\mathsf{X}$相同。下面的示例可以解释这一点。
X = np.random.uniform(size=(1, 10, 16, 16)) conv = nn.Conv2D(20, kernel_size=5, padding=2, strides=3) tconv = nn.Conv2DTranspose(10, kernel_size=5, padding=2, strides=3) conv.initialize() tconv.initialize() tconv(conv(X)).shape == X.shape
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
[**与矩阵变换的联系**]:label:`subsec-connection-to-mat-transposition`转置卷积为何以矩阵变换命名呢?让我们首先看看如何使用矩阵乘法来实现卷积。在下面的示例中,我们定义了一个$3\times 3$的输入`X`和$2\times 2$卷积核`K`,然后使用`corr2d`函数计算卷积输出`Y`。
X = np.arange(9.0).reshape(3, 3) K = np.array([[1.0, 2.0], [3.0, 4.0]]) Y = d2l.corr2d(X, K) Y
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
接下来,我们将卷积核`K`重写为包含大量0的稀疏权重矩阵`W`。权重矩阵的形状是($4$,$9$),其中非0元素来自卷积核`K`。
def kernel2matrix(K): k, W = np.zeros(5), np.zeros((4, 9)) k[:2], k[3:5] = K[0, :], K[1, :] W[0, :5], W[1, 1:6], W[2, 3:8], W[3, 4:] = k, k, k, k return W W = kernel2matrix(K) W
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
逐行连结输入`X`,获得了一个长度为9的矢量。然后,`W`的矩阵乘法和向量化的`X`给出了一个长度为4的向量。重塑它之后,可以获得与上面的原始卷积操作所得相同的结果`Y`:我们刚刚使用矩阵乘法实现了卷积。
Y == np.dot(W, X.reshape(-1)).reshape(2, 2)
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
同样,我们可以使用矩阵乘法来实现转置卷积。在下面的示例中,我们将上面的常规卷积$2 \times 2$的输出`Y`作为转置卷积的输入。想要通过矩阵相乘来实现它,我们只需要将权重矩阵`W`的形状转置为$(9, 4)$。
Z = trans_conv(Y, K) Z == np.dot(W.T, Y.reshape(-1)).reshape(3, 3)
_____no_output_____
MIT
submodules/resource/d2l-zh/mxnet/chapter_computer-vision/transposed-conv.ipynb
alphajayGithub/ai.online
[作業重點]清楚了解 L1, L2 的意義與差異為何,並了解 LASSO 與 Ridge 之間的差異與使用情境 作業 請閱讀相關文獻,並回答下列問題[脊回歸 (Ridge Regression)](https://blog.csdn.net/daunxx/article/details/51578787)[Linear, Ridge, Lasso Regression 本質區別](https://www.zhihu.com/question/38121173) Q1: LASSO 回歸可以被用來作為 Feature selection 的工具,請了解 LASSO 模型為什麼可用來作 Feature selection A1: LASSO 基於 L1 正則,隨著 alpha 的提升數值為0的參數數量隨之提升,而參數為0的特徵可以視為對模型沒有影響力,理當剔除,因此 LASSO 具有 Feature Selection 的功能。 Q2: 當自變數 (X) 存在高度共線性時,Ridge Regression 可以處理這樣的問題嗎? A2: 可以。當自變數存在高度共線性時,模型對 noise 的敏感度會提高,Ridge Regression 基於 L2 正則,可以減緩參數的劇烈變動,降低 noise 的影響性。
_____no_output_____
MIT
Day_039_HW.ipynb
semishen/ML100Days
PRESENTACIÓN **nombre:** Gabriela Ivonne Montoya Ortiz - **Profesión**: **Estudiante** - **Edad**: 19 años - **Pasa tiempos**: bailar diferentes estilos, leer, ver peliculas. - **Educación**: colegio salesiano anahuac revolucion Foto: Hola Ecuaciones... $f(x) = \sin(x)$
import matplotlib.pyplot as plt import numpy as np %matplotlib inline x = np.linspace(-2*np.pi, 2*np.pi, 100) plt.figure(figsize = (4,3)) plt.plot(x, np.sin(x));
_____no_output_____
MIT
Untitled.ipynb
Gabs0102/Mi-primer-Proyecto
$$\frac{df}{dx} = -\nabla\psi $$ ** Minichatbot, Gaby **
q1 = "¿Cómo te llamas?" q2 = "¿Qué edad tienes?" q3 = "¿Donde vienes?" q4 = "¿Sexo?" qs = [q1, q2, q3, q4] qs ans1 = "Mucho gusto, me llamo Gaby. " ans2 = "Legal!" ans3 = "Orale, muy bien." ans4 = "NO." anss = [ans1, ans2, ans3, ans4] anss def chatGaby(): print("Hola, bienvenite!, tengo algunas preguntas para ti.") print(qs[0]) nombre = input(">>> ") print(anss[0] + "Me gusta tu nombre, %s" % nombre) print("segunda pregunta") print(qs[1]) edad = input(">>> ") print("ohh, ya vas a cumplir %s" % (int(edad+1))) print(anss[1]) print("Je, y " + qs[2]) print(anss[2]) print(qs[3]) print(anss[3]) chatGaby() %run welcome.py
_____no_output_____
MIT
Untitled.ipynb
Gabs0102/Mi-primer-Proyecto
**LSTM - Time Series Prediction** **Importing libraries**
import pandas import matplotlib.pyplot as plt import numpy import math from tqdm import tqdm from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense from tensorflow.keras.layers import LSTM from sklearn.preprocessing import MinMaxScaler from sklearn.metrics import mean_squared_error # fix random seed for reproducibility numpy.random.seed(7)
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
**Load the data**
! rm /content/airline-passengers.csv ! wget https://raw.githubusercontent.com/jbrownlee/Datasets/master/airline-passengers.csv dataset = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python') plt.plot(dataset) plt.show() # convert an array of values into a dataset matrix def create_dataset(dataset, look_back=1): dataX, dataY = [], [] for i in range(len(dataset)-look_back-1): a = dataset[i:(i+look_back), 0] dataX.append(a) dataY.append(dataset[i + look_back, 0]) return numpy.array(dataX), numpy.array(dataY)
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials
**LSTM Network for Regression**
# load the dataset dataframe = pandas.read_csv('airline-passengers.csv', usecols=[1], engine='python') dataset = dataframe.values dataset = dataset.astype('float32') # normalize the dataset scaler = MinMaxScaler(feature_range=(0, 1)) dataset = scaler.fit_transform(dataset) # split into train and test sets train_size = int(len(dataset) * 0.67) test_size = len(dataset) - train_size train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] print(len(train), len(test)) # reshape into X=t and Y=t+1 look_back = 1 trainX, trainY = create_dataset(train, look_back) testX, testY = create_dataset(test, look_back) # reshape input to be [samples, time steps, features] trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1])) # create and fit the LSTM network model = Sequential() model.add(LSTM(4, input_shape=(1, look_back))) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='adam') model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=0) # make predictions trainPredict = model.predict(trainX) testPredict = model.predict(testX) # invert predictions trainPredict = scaler.inverse_transform(trainPredict) trainY = scaler.inverse_transform([trainY]) testPredict = scaler.inverse_transform(testPredict) testY = scaler.inverse_transform([testY]) # calculate root mean squared error trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0])) print('Train Score: %.2f RMSE' % (trainScore)) testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0])) print('Test Score: %.2f RMSE' % (testScore)) # shift train predictions for plotting trainPredictPlot = numpy.empty_like(dataset) trainPredictPlot[:, :] = numpy.nan trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict # shift test predictions for plotting testPredictPlot = numpy.empty_like(dataset) testPredictPlot[:, :] = numpy.nan testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict # plot baseline and predictions plt.plot(scaler.inverse_transform(dataset)) plt.plot(trainPredictPlot) plt.plot(testPredictPlot) plt.show()
_____no_output_____
MIT
time-series-prediction.ipynb
srivarshan-s/LSTM-Trials