markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
Strings can be written in different ways using escape characters and other symbols.
|
print("That is Alice's cat.")
print('Say hi to Bob\'s mother.')
|
AutomateTheBoringStuffWithPython/lesson19.ipynb
|
Vvkmnn/books
|
gpl-3.0
|
Many escape characters are available:
|
print('\'') # Single quote
print('\"') # Double quote
print('\t') # Tab
print('\n') # New line
print('\\') # Back slash
|
AutomateTheBoringStuffWithPython/lesson19.ipynb
|
Vvkmnn/books
|
gpl-3.0
|
Use r'' to print raw strings, where all characters are treated as strings.
|
print(r'That is Carol\'s cat.')
|
AutomateTheBoringStuffWithPython/lesson19.ipynb
|
Vvkmnn/books
|
gpl-3.0
|
Multi line strings (with '''' or """ are often easier.)
|
print("""Dear Alice,
Eve's cat has been arrested for catnapping, cat burglarly, and extortion.
Sincerly, Bob.""")
|
AutomateTheBoringStuffWithPython/lesson19.ipynb
|
Vvkmnn/books
|
gpl-3.0
|
Large strings are treated in this way.
|
import requests # get the Requests library
link = "https://automatetheboringstuff.com/files/rj.txt" # store the link where the file is at
rj = requests.get(link) # use Requests to get the link
print(rj.text) # Print out the text from Requests, and check that its stored
|
AutomateTheBoringStuffWithPython/lesson19.ipynb
|
Vvkmnn/books
|
gpl-3.0
|
Functions can now be run on it:
|
print(len(rj.text))
|
AutomateTheBoringStuffWithPython/lesson19.ipynb
|
Vvkmnn/books
|
gpl-3.0
|
Strings can be treated like lists where each character is an elemant in that list.
|
hello = 'Hello World!'
print(hello[0])
print(hello[1:5])
print(hello[-1])
print('Hello' in hello)
print('x' in hello)
print('HELLO' in hello)
|
AutomateTheBoringStuffWithPython/lesson19.ipynb
|
Vvkmnn/books
|
gpl-3.0
|
confirm results from grouped rating approach (sample)
|
df_top_hops = df_ta_bar_reviews[df_ta_bar_reviews.bar=="Top_Hops"]
df_top_hops.head(15)
df_top_hops_grouped = pd.concat([df_top_hops[['first_of_month', 'rating']].groupby(['first_of_month']).mean(), \
df_top_hops[['first_of_month', 'rating']].groupby(['first_of_month']).count()], \
axis=1)
df_top_hops_grouped.columns = ['rating_mean', 'rating_count']
df_top_hops_grouped.sort_index(ascending=False, inplace=True)
df_top_hops_grouped.head()
|
code/reseen/ta_reviews.ipynb
|
endlesspint8/endlesspint8.github.io
|
mit
|
confirmed
|
m_weight = pd.Series( ( df_top_hops_grouped.index.max() - df_top_hops_grouped.index ) / ( np.timedelta64(1, 'M') ) ).apply(month_weight)
m_rating = df_top_hops_grouped.rating_mean.values * df_top_hops_grouped.rating_count.values
m_denom = np.sum( m_weight * df_top_hops_grouped.rating_count.values )
w_rating = np.sum( m_weight * m_rating ) / m_denom
w_rating
|
code/reseen/ta_reviews.ipynb
|
endlesspint8/endlesspint8.github.io
|
mit
|
cycle through dates/index for moving weighted rating
(ignore missing months for time being)
|
moving_weighted_ratings = []
for row in range(df_top_hops_grouped.shape[0]):
m_weight = pd.Series((df_top_hops_grouped.iloc[row:].index.max() - df_top_hops_grouped.iloc[row:].index) / ( np.timedelta64(1, 'M') )).apply(month_weight)
m_rating = df_top_hops_grouped.iloc[row:]['rating_mean'].values * df_top_hops_grouped.iloc[row:]['rating_count'].values
m_denom = np.sum( m_weight * df_top_hops_grouped.iloc[row:]['rating_count'].values )
w_rating = np.round( (np.sum( m_weight * m_rating ) / m_denom), 3 )
moving_weighted_ratings.append( w_rating )
moving_weighted_ratings[:10]
df_top_hops_grouped['moving_weighted_ratings'] = moving_weighted_ratings
df_top_hops_grouped.head(10)
df_top_hops_grouped.moving_weighted_ratings.plot()
|
code/reseen/ta_reviews.ipynb
|
endlesspint8/endlesspint8.github.io
|
mit
|
fill in missing months
|
df_top_hops_grouped = pd.concat([df_top_hops[['first_of_month', 'rating']].groupby(['first_of_month']).mean(), \
df_top_hops[['first_of_month', 'rating']].groupby(['first_of_month']).count()], \
axis=1)
df_top_hops_grouped.columns = ['rating_mean', 'rating_count']
def group_ratings_by_date(df, date_field, rating_field):
df_grouped = pd.concat([df[[date_field, rating_field]].groupby([date_field]).mean(), \
df[[date_field, rating_field]].groupby([date_field]).count()], \
axis=1)
df_grouped.columns = ['rating_mean', 'rating_count']
return df_grouped
df_top_hops_grouped = group_ratings_by_date(df_top_hops, 'first_of_month', 'rating')
df_top_hops_grouped.head()
def expand_dates(df, index=True, date_field=False):
if index:
return pd.Series(pd.date_range(df.index.min(), df.index.max().replace(month = df.index.max().month + 1), freq="M")).apply(first_of_month)
else:
return pd.Series(pd.date_range(df[date_field].min(), df[date_field].max().replace(month = df[date_field].max().month + 1), freq="M")).apply(first_of_month)
expanded_date_series = expand_dates(df_top_hops_grouped)
df_exp_dates = pd.DataFrame({'bar': len(expanded_date_series) * ['Top_Hops']}, index=expanded_date_series)
df_exp_dates.sort_index(ascending=False, inplace=True)
df_exp_dates.head()
df_top_hops_grouped = pd.concat([df_exp_dates, df_top_hops_grouped], axis=1).fillna(0)
df_top_hops_grouped.sort_index(ascending=False, inplace=True)
df_top_hops_grouped.head(10)
def moving_weighted_ratings(df):
ratings = []
for row in range(df.shape[0]):
m_weight = pd.Series((df.iloc[row:].index.max() - df.iloc[row:].index) / ( np.timedelta64(1, 'M') )).apply(month_weight)
m_rating = df.iloc[row:]['rating_mean'].values * df.iloc[row:]['rating_count'].values
m_denom = np.sum( m_weight * df.iloc[row:]['rating_count'].values )
w_rating = np.round( (np.sum( m_weight * m_rating ) / m_denom), 3 )
ratings.append( w_rating )
return ratings
moving_weighted_ratings(df_top_hops_grouped)[:10]
df_top_hops_grouped['moving_weighted_ratings'] = moving_weighted_ratings(df_top_hops_grouped)
df_top_hops_grouped.head(10)
df_top_hops_grouped.moving_weighted_ratings.plot()
|
code/reseen/ta_reviews.ipynb
|
endlesspint8/endlesspint8.github.io
|
mit
|
let's run for all, merge and...
|
all_date_series = expand_dates(df_ta_bar_reviews, False, 'first_of_month')
df_all_dates = pd.DataFrame({'p_holder': np.zeros(len(all_date_series))}, index=all_date_series)
df_all_dates.sort_index(ascending=False, inplace=True)
df_all_dates.head()
df_bar_grouped = group_ratings_by_date(df_ta_bar_reviews[df_ta_bar_reviews.bar=='230_Fifth'], 'first_of_month', 'rating')
expanded_date_series = expand_dates(df_bar_grouped)
df_exp_dates = pd.DataFrame({'bar': len(expanded_date_series) * ['230_Fifth']}, index=expanded_date_series)
df_bar_grouped = pd.concat([df_exp_dates, df_bar_grouped], axis=1).fillna(0)
df_bar_grouped.sort_index(ascending=False, inplace=True)
df_bar_grouped['230_Fifth'] = moving_weighted_ratings(df_bar_grouped)
df_bar_grouped.head(10)
def bar_moving_weighted_ratings(df, bar_field, bar_name, date_field, rating_field):
df_bar_grouped = group_ratings_by_date(df[df[bar_field] == bar_name], date_field, rating_field)
expanded_date_series = expand_dates(df_bar_grouped)
df_exp_dates = pd.DataFrame({'bar': len(expanded_date_series) * [bar_name]}, index=expanded_date_series)
df_bar_grouped = pd.concat([df_exp_dates, df_bar_grouped], axis=1).fillna(0)
df_bar_grouped.sort_index(ascending=False, inplace=True)
df_bar_grouped[bar_name] = moving_weighted_ratings(df_bar_grouped)
return df_bar_grouped
bar_moving_weighted_ratings(df_ta_bar_reviews, 'bar', '230_Fifth', 'first_of_month', 'rating').head(10)
for b in df_ta_bar_reviews.bar.unique():
df_bar_moving_weighted_ratings = bar_moving_weighted_ratings(df_ta_bar_reviews, 'bar', b, 'first_of_month', 'rating')
print(b, '\t', df_bar_moving_weighted_ratings[b][0])
df_all_dates = pd.concat([df_all_dates, df_bar_moving_weighted_ratings[b]], axis=1).fillna(0)
df_all_dates.sort_index(ascending=False, inplace=True)
df_all_dates.head(20)
# df_all_dates.to_excel('ta_bar_reviews_weighted_moving_avg.xlsx')
|
code/reseen/ta_reviews.ipynb
|
endlesspint8/endlesspint8.github.io
|
mit
|
need to concat each bar on entire date range first and then work backwards in ratings, this will remove sudden zeroes in June
|
def bar_moving_weighted_ratings_macro_date_range(df, bar_field, bar_name, date_field, rating_field):
df_bar_grouped = group_ratings_by_date(df[df[bar_field] == bar_name], date_field, rating_field)
# change 1
df_macro_dates_grouped = group_ratings_by_date(df, date_field, rating_field)
# change 2
expanded_date_series = expand_dates(df_macro_dates_grouped)
df_exp_dates = pd.DataFrame({'bar': len(expanded_date_series) * [bar_name]}, index=expanded_date_series)
df_bar_grouped = pd.concat([df_exp_dates, df_bar_grouped], axis=1).fillna(0)
df_bar_grouped.sort_index(ascending=False, inplace=True)
df_bar_grouped[bar_name] = moving_weighted_ratings(df_bar_grouped)
return df_bar_grouped
bar_moving_weighted_ratings_macro_date_range(df_ta_bar_reviews, 'bar', 'Earl_s_Beer_and_Cheese', 'first_of_month', 'rating').head(10)
all_date_series = expand_dates(df_ta_bar_reviews, False, 'first_of_month')
df_all_dates = pd.DataFrame({'p_holder': np.zeros(len(all_date_series))}, index=all_date_series)
df_all_dates.sort_index(ascending=False, inplace=True)
# df_all_dates.head()
for b in df_ta_bar_reviews.bar.unique():
df_bar_moving_weighted_ratings = bar_moving_weighted_ratings_macro_date_range(df_ta_bar_reviews, 'bar', b, 'first_of_month', 'rating')
print(b, '\t', df_bar_moving_weighted_ratings[b][0])
df_all_dates = pd.concat([df_all_dates, df_bar_moving_weighted_ratings[b]], axis=1).fillna(0)
df_all_dates.sort_index(ascending=False, inplace=True)
df_all_dates.head()
# df_all_dates.to_excel('ta_bar_reviews_weighted_moving_avg_macro_date_range.xlsx')
df_all_dates.Broome_Street_Bar.plot()
|
code/reseen/ta_reviews.ipynb
|
endlesspint8/endlesspint8.github.io
|
mit
|
Loading data from a CSV
The Table is the basic class in agate. To create a table from a CSV we use the Table.from_csv class method:
|
exonerations = agate.Table.from_csv('examples/realdata/exonerations-20150828.csv')
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
With no other arguments specified, agate will automatically create an instance of TypeTester and use it to figure out the type of each column. TypeTester is a "best guess" approach to determining the kinds of data in your table. It can guess wrong. In that case you can create a TypeTester manually and use the force argument to override its guess for a specific column:
|
tester = agate.TypeTester(force={
'false_evidence': agate.Boolean()
})
exonerations = agate.Table.from_csv('examples/realdata/exonerations-20150828.csv', column_types=tester)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
If you already know the types of your data you may wish to skip the TypeTester entirely. You may pass sequences of column names and column types to Table.from_csv as the column_names and column_types arguments, respectively.
For larger datasets the TypeTester can be slow to evaluate the data. In that case you can specify a limit argument to restrict the amount of data it will use to infer types:
|
tester = agate.TypeTester(limit=100)
exonerations = agate.Table.from_csv('examples/realdata/exonerations-20150828.csv', column_types=tester)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
The dataset we are using in this tutorial is simple enough that we can rely on the built-in TypeTester to guess quickly and accurately.
Note: agate's CSV reader and writer support unicode and other encodings for both Python 2 and Python 3. Try using them as a drop-in replacement for Python's builtin module: from agate import csv.
Note: agate also has Table.from_json for creating tables from JSON data.
Describing the table
If you're working with new data, or you just need a refresher, you may want to review what columns are in the table. You can do this with the .Table.print_structure method or by just calling print on the table:
|
print(exonerations)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Navigating table data
agate goes to great pains to make accessing the data in your tables work seamlessly for a wide variety of use-cases. Access by both Column and Row is supported, via the Table.columns and Table.rows attributes respectively.
All four of these objects are examples of .MappedSequence, the foundational type that underlies much of agate's functionality. A MappedSequence functions very similar to a standard Python dict, with a few important exceptions:
Data may be accessed either by numeric index (e.g. column number) or by a non-integer key (e.g. column name).
Items are ordered, just like an instance of collections.OrderedDict.
Iterating over the sequence returns its values, rather than its keys.
To demonstrate the first point, these two lines are both valid ways of getting the first column in the exonerations table:
|
exonerations.columns['last_name']
exonerations.columns[0]
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
In the same way, rows can be accessed either by numeric index or by an optional, unique "row name" specified when the table is created. In this tutorial we won't use row names, but here is an example of how they work:
|
exonerations = agate.Table.from_csv('examples/realdata/exonerations-20150828.csv', row_names=lambda r: '%(last_name)s, %(first_name)s' % (r))
exonerations.rows[0]
exonerations.rows['Abbitt, Joseph Lamont']
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
In this case we create our row names using a lambda function that takes a row and returns an unique identifer. If your data has a unique column, you can also just pass the column name. (For example, a column of USPS abbrevations or FIPS codes.) Note, however, that your row names can never be int, because that is reserved for indexing by numeric order. (A decimal.Decimal or stringified integer is just fine.)
Once you've got a specific row, you can then access its individual values (cells, in spreadsheet-speak) either by numeric index or column name:
|
row = exonerations.rows[0]
row[0]
row['last_name']
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
And the same goes for columns, which can be indexed numerically or by row name (if one has been setup):
|
column = exonerations.columns['crime']
column[0]
column['Abbitt, Joseph Lamont']
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
For any instance of .MappedSequence, iteration returns values, in order. Here we print only the first ten:
|
for row in exonerations.rows[:10]:
print(row['last_name'])
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
To summarize, the four most common data structures in agate (Column, Row, Table.columns and Table.rows) are all instances of MappedSequence and therefore all behave in a uniform way. This is also true of TableSet, which will discuss later on.
Aggregating column data
With the basics out of the way, let's do some actual analysis. Analysis begins with questions, so let's ask some.
Question: How many exonerations involved a false confession?
Answering this question involves counting the number of True values in the false_confession column. When we created the table we specified that the data in this column contained Boolean data. Because of this, agate has taken care of coercing the original text data from the CSV into Python's True and False values.
We'll answer the question using an instance of Count which is a type of Aggregation. Aggregations are used to perform "column-wise" calculations. That is, they derive a new single value from the contents of a column. The Count aggregation can count either all values in a column, or how many times a particular value appears.
An Aggregation is applied to a table using Table.aggregate.
It sounds complicated, but it's really simple. Putting it all together looks like this:
|
exonerations.aggregate(agate.Count('false_confession', True))
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Let's look at another example, this time using a numerical aggregation.
Question: What was the median age of exonerated indviduals at time of arrest?
|
exonerations.aggregate(agate.Median('age'))
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
The answer to our question is "26 years old", however, as the warnings indicate, not every exonerated individual in the data has a value for the age column. The Median statistical operation has no standard way of accounting for null values, so it leaves them out of the calculation.
Question: How many individuals do not have an age specified in the data?
Now that we know there are null values in the age column, we might worry about our sample size. What if most of the rows don't have an age?
|
exonerations.aggregate(agate.Count('age', None))
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Only nine rows in this dataset don't have age, so it's certainly still useful to compute a median. However, we might still want to filter those rows out so we could have a consistent sample for all of our calculations. In the next section you'll learn how to do just that.
Different aggregations can be applied depending on the type of data in each column. If none of the provided aggregations suit your needs you can use Summary to apply an arbitrary function to a column. If that still doesn't suit your needs you can always create your own aggregation from scratch by subclassing Aggregation.
Selecting and filtering data
So what if those rows with no age were going to flummox our analysis? Agate's Table class provides a full suite of SQL-like operations including Table.select for grabbing specific columns, Table.where for selecting particular rows and Table.group_by for grouping rows by common values.
Let's use Table.where to filter our exonerations table to only those individuals that have an age specified.
|
with_age = exonerations.where(lambda row: row['age'] is not None)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
You'll notice we provide a lambda function to the Table.where. This function is applied to each row and if it returns True, then the row is included in the output table.
A crucial thing to understand about these table methods is that they return new tables. In our example above exonerations was a Table instance and we applied Table.where, so with_age is a new, different Table. The tables themselves can't be changed. You can create new tables with these methods, but you can't modify them in-place. (If this seems weird, just trust me. There are lots of good computer science-y reasons to do it this way.)
We can verify this did what we expected by counting the rows in the original table and rows in the new table:
|
len(exonerations.rows) - len(with_age.rows)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Nine rows were removed, which is the number of nulls we had already identified were in the column.
Now if we calculate the median age of these individuals, we don't see the warning anymore.
|
with_age.aggregate(agate.Median('age'))
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Computing new columns
In addition to "column-wise" aggregations there are also "row-wise" computations. Computations go through a Table row-by-row and derive a new column using the existing data. To perform row computations in agate we use subclasses of Computation.
When one or more instances of Computation are applied with the Table.compute method, a new table is created with additional columns.
Question: How long did individuals remain in prison before being exonerated?
To answer this question we will apply the Change computation to the convicted and exonerated columns. Each of these columns contains the individual's age at the time of that event. All that Change does is compute the difference between two numbers. (In this case each of these columns contain a Number, but this will also work with Date or DateTime.)
|
with_years_in_prison = exonerations.compute([
('years_in_prison', agate.Change('convicted', 'exonerated'))
])
with_years_in_prison.aggregate(agate.Median('years_in_prison'))
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
The median number of years an exonerated individual spent in prison was 8 years.
Sometimes, the built-in computations, such as Change won't suffice. I mentioned before that you could perform arbitrary column-wise aggregations using Summary. You can do the same thing for row-wise computations using Formula. This is somewhat analogous to Excel's cell formulas.
For example, this code will create a full_name column from the first_name and last_name columns in the data:
|
full_names = exonerations.compute([
('full_name', agate.Formula(agate.Text(), lambda row: '%(first_name)s %(last_name)s' % row))
])
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
For efficiency's sake, agate allows you to perform several computations at once (though their results can't depend on one another):
|
with_computations = exonerations.compute([
('full_name', agate.Formula(agate.Text(), lambda row: '%(first_name)s %(last_name)s' % row)),
('years_in_prison', agate.Change('convicted', 'exonerated'))
])
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
You can also compute new columns to clean up your raw data. In the initial data, the state column has some values with a 'F-' prefix on the state abbreviation. Cases with that prefix are federal cases as opposed to state prosecutions. To make the data easier to use, we can create a new federal column to tag federal cases and clean up the original state column:
|
clean_state_data = exonerations.compute([
('federal', agate.Formula(agate.Boolean(), lambda row: row['state'].startswith('F-'))),
('state', agate.Formula(agate.Text(), lambda row: row['state'][2:] if row['state'].startswith('F-') else row['state']))
], replace=True)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
We add the replace argument to our compute method to replace the state column in place.
If Formula is not flexible enough (for instance, if you needed to compute a new value based on the distribution of data in a column) you can always implement your own subclass of Computation. See the API documentation for [computations](https://agate.readthedocs.io/en/latest/api/computations.html#module-agate.computations to see all of the supported ways to compute new data.
Sorting and slicing
Question: Who are the ten exonerated individuals who were youngest at the time they were arrested?
Remembering that methods of tables return tables, we will use Table.order_by to sort our table:
|
sorted_by_age = exonerations.order_by('age')
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
We can then use Table.limit get only the first ten rows of the data.
|
youngest_ten = sorted_by_age.limit(10)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Now let's use Table.print_table to help us pretty the results in a way we can easily review:
|
youngest_ten.print_table(max_columns=7)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
If you find it impossible to believe that an eleven year-old was convicted of murder, I encourage you to read the Registry's description of the case.
Note: In the previous example we could have omitted the Table.limit and passed a max_rows=10 to Table.print_table instead. In this case they accomplish exactly the same goal.
What if we were more curious about the distribution of ages, rather than the highest or lowest? agate includes the Table.pivot and Table.bins methods for counting values individually or by ranges. Let's try binning the ages. Then, instead of using Table.print_table, we'll use Table.print_bars to generate a simple, text bar chart.
|
binned_ages = exonerations.bins('age', 10, 0, 100)
binned_ages.print_bars('age', 'Count', width=80)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Notice that we specify we want :code:10 bins spanning the range :code:0 to :code:100. If these values are omitted agate will attempt to infer good defaults. We also specify that we want our bar chart to span a width of :code:80 characters. This can be adjusted to a suitable width for your terminal or document.
Note: If you use a monospaced font, such as Courier, you can copy and paste agate bar charts into emails or documents. No screenshots required.
Grouping and aggregating
Question: Which state has seen the most exonerations?
This question can't be answered by operating on a single column. What we need is the equivalent of SQL's GROUP BY. agate supports a full set of SQL-like operations on tables. Unlike SQL, agate breaks grouping and aggregation into two discrete steps.
First, we use Table.group_by to group the data by state.
|
by_state = clean_state_data.group_by('state')
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
This takes our original Table and groups it into a TableSet, which contains one table per state. As mentioned much earlier in this tutorial, TableSets are instances of MappedSequence. That means they work very much like Column and Row.
Now we need to aggregate the total for each state. This works in a very similar way to how it did when we were aggregating columns of a single table, except that we'll use the Count aggregation to count the total number of rows in each group.
|
state_totals = by_state.aggregate([
('count', agate.Count())
])
sorted_totals = state_totals.order_by('count', reverse=True)
sorted_totals.print_table(max_rows=5)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
You'll notice we pass a sequence of tuples to TableSet.aggregate. Each one includes two elements. The first is the new column name being created. The second is an instance of some Aggregation. Unsurpringly, in this case the results appear to be roughly proportional to population.
Question: What state has the longest median time in prison prior to exoneration?
This is a much more complicated question that's going to pull together a lot of the features we've been using. We'll repeat the computations we applied before, but this time we're going to roll those computations up in state-by-state groups and then take the [Median](https://agate.readthedocs.io/en/latest/api/aggregations.html#agate.Median of each group. Then we'll sort the data and see where people have been stuck in prison the longest.
|
with_years_in_prison = exonerations.compute([
('years_in_prison', agate.Change('convicted', 'exonerated'))
])
state_totals = with_years_in_prison.group_by('state')
medians = state_totals.aggregate([
('count', agate.Count()),
('median_years_in_prison', agate.Median('years_in_prison'))
])
sorted_medians = medians.order_by('median_years_in_prison', reverse=True)
sorted_medians.print_table(max_rows=5)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
DC? Nebraska? What accounts for these states having the longest times in prison before exoneration? I have no idea! Given that the group sizes are small, it would probably be wise to look for outliers.
As with Table.aggregatehttps://agate.readthedocs.io/en/latest/api/table.html#agate.Table.aggregate and Table.compute, the TableSet.aggregate method takes a list of aggregations to perform. You can aggregate as many columns as you like in a single step and they will all appear in the output table.
Multi-dimensional aggregation
I've already shown you that you can use TableSet to group instances of Table. However, you can also use a TableSet to group other TableSets. To put that another way, instances of TableSet can be nested.
The key to nesting data in this way is to use TableSet.group_by. This is one of many methods that can be called on a TableSet, which will then be applied to all the tables it contains. In the last section we used Table.group_by to split data up into a group of tables. By calling TableSet.group_by, which essentially called group_by on each table and collect the results. This can be pretty hard to wrap your head around, so let's look at a concrete example.
Question: Is there a collective relationship between race, age and time spent in prison prior to exoneration?
I'm not going to explain every stage of this analysis as most of it repeats patterns used previously. The key part to look for is the two separate uses of group_by:
|
# Filters rows without age data
only_with_age = with_years_in_prison.where(
lambda r: r['age'] is not None
)
# Group by race
race_groups = only_with_age.group_by('race')
# Sub-group by age cohorts (20s, 30s, etc.)
race_and_age_groups = race_groups.group_by(
lambda r: '%i0s' % (r['age'] // 10),
key_name='age_group'
)
# Aggregate medians for each group
medians = race_and_age_groups.aggregate([
('count', agate.Count()),
('median_years_in_prison', agate.Median('years_in_prison'))
])
# Sort the results
sorted_groups = medians.order_by('median_years_in_prison', reverse=True)
# Print out the results
sorted_groups.print_table(max_rows=10)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Exploratory charting
Beginning with version 1.5.0, agate includes the pure-Python SVG charting library leather. Leather allows you to generate "good enough" charts with as little as one line of code. It's especially useful if you're working in a Jupyter Notebook, as the results will render inline.
There are currently four chart types support: Table.bar_chart, Table.column_chart, Table.line_chart, and Table.scatterplot.
Let's create charts from a few slices of data we've made in this tutorial.
Exonerations by state
|
sorted_totals.bar_chart('state', 'count', height=1000)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Leather will try to maintain a reasonable aspect ratio for chart. In this case the chart is too short to display correctly. We've used the height argument to make the chart a little taller.
Exonerations by age bracket
When creating a chart you may omit the column name arguments. If you do so the first and second columns in the table will be used. This is especially useful for charting the output of TableSet.aggregate or Table.bins.
|
binned_ages.bar_chart()
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Exonerations by year
|
by_year_exonerated = exonerations.group_by('exonerated')
counts = by_year_exonerated.aggregate([
('count', agate.Count())
])
counts.order_by('exonerated').line_chart('exonerated', 'count')
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Exonerations over time, for most commonly exonerated crimes
The real power of agate's exploratory charting comes when we want to compare different facets of data. With leather, agate can automatically render a of chart for each group in a TableSet.
|
# Filter to crimes with at least 100 exonerations
top_crimes = exonerations.group_by('crime').having([
('count', agate.Count())
], lambda t: t['count'] > 100)
# Group by year of exoneration
by_year = top_crimes.group_by('exonerated')
# Count number of exonerations in each year
counts = by_year.aggregate([
('count', agate.Count())
])
# Group by crime
by_crime = counts.group_by('crime')
# Sort each group of exonerations by year and chart the results
by_crime.order_by('exonerated').line_chart('exonerated', 'count')
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Styling charts
As mentioned above, leather is designed for making "good enough" charts. You are never going to create a polished chart. However, sometimes you may want more control than agate offers through it's own methods. You can take more control over how your charts are presented by using leather directly.
|
import leather
chart = leather.Chart('Total exonerations by state')
chart.add_y_axis(name='State')
chart.add_x_axis(name='Number of exonerations')
chart.add_bars(sorted_totals, x='count', y='state')
chart.to_svg(height=1000)
|
tutorial.ipynb
|
onyxfish/journalism
|
mit
|
Language Model Downloading
Download BERT
|
bert_model = languagemodels.BERTModel()
|
videoretrieval/Demo notebook BERT.ipynb
|
googleinterns/via-content-understanding
|
apache-2.0
|
Encoding Generation
Generate Encodings for MSR-VTT
|
train.language_model.generate_and_cache_encodings(
bert_model, datasets.msrvtt_dataset)
|
videoretrieval/Demo notebook BERT.ipynb
|
googleinterns/via-content-understanding
|
apache-2.0
|
Training
Build Train Datasets
Initialize Models
Compile Encoders
Fit Model
Test Model
Datasets Generation
|
experts_used = [
experts.i3d,
experts.r2p1d,
experts.resnext,
experts.senet,
experts.speech_expert,
experts.ocr_expert,
experts.audio_expert,
experts.densenet,
experts.face_expert]
train_ds, valid_ds, test_ds = (
train.encoder_datasets.generate_language_model_fine_tuning_datasets(
bert_model, datasets.msrvtt_dataset, experts_used))
|
videoretrieval/Demo notebook BERT.ipynb
|
googleinterns/via-content-understanding
|
apache-2.0
|
Model Initialization
|
class MishLayer(tf.keras.layers.Layer):
def call(self, inputs):
return mish(inputs)
mish(tf.Variable([1.0]))
text_encoder = models.components.TextEncoder(
len(experts_used),
num_netvlad_clusters=28,
ghost_clusters=1,
language_model_dimensionality=768,
encoded_expert_dimensionality=512,
residual_cls_token=True,
)
video_encoder = models.components.VideoEncoder(
num_experts=len(experts_used),
experts_use_netvlad=[False, False, False, False, True, True, True, False, False],
experts_netvlad_shape=[None, None, None, None, 19, 43, 8, None, None],
expert_aggregated_size=512,
encoded_expert_dimensionality=512,
g_mlp_layers=3,
h_mlp_layers=0,
make_activation_layer=MishLayer)
encoder = models.encoder.EncoderForLanguageModelTuning(
video_encoder,
text_encoder,
0.05,
[1, 5, 10, 50],
20,
bert_model.model,
64)
|
videoretrieval/Demo notebook BERT.ipynb
|
googleinterns/via-content-understanding
|
apache-2.0
|
Encoder Compliation
|
def build_optimizer(lr=0.001):
learning_rate_scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=lr,
decay_steps=1000,
decay_rate=0.95,
staircase=True)
return tf.keras.optimizers.Adam(learning_rate_scheduler)
encoder.compile(build_optimizer(5e-5), metrics.loss.bidirectional_max_margin_ranking_loss)
train_ds_prepared = (train_ds
.shuffle(7000)
.batch(32, drop_remainder=True)
.prefetch(tf.data.experimental.AUTOTUNE))
valid_ds_prepared = (valid_ds
.prefetch(tf.data.experimental.AUTOTUNE)
.batch(497 * 20, drop_remainder=True)
.cache())
encoder.language_model.trainable = True
encoder.video_encoder.trainable = True
encoder.text_encoder.trainable = True
|
videoretrieval/Demo notebook BERT.ipynb
|
googleinterns/via-content-understanding
|
apache-2.0
|
Model fitting
|
encoder.fit(
train_ds_prepared,
#validation_data=valid_ds_prepared,
epochs=250,
)
|
videoretrieval/Demo notebook BERT.ipynb
|
googleinterns/via-content-understanding
|
apache-2.0
|
2. Load each array of Ps and Ts
|
import numpy as np
Ps = np.zeros(Nfiles, dtype=np.ndarray)
Ts = np.zeros(Nfiles, dtype=np.ndarray)
for n in range(Nfiles):
Ps[n] = np.load('C:/gh/data/example/lfp_set_PsTs/' + str(n) + '/out/Ps_data.npy')
Ts[n] = np.load('C:/gh/data/example/lfp_set_PsTs/' + str(n) + '/out/Ts_data.npy')
|
demo_OSG_python/demo_osg_python - plot output from OSG.ipynb
|
srcole/qwm
|
mit
|
3. Load signals
|
lfps = np.zeros(Nfiles, dtype=np.ndarray)
for n in range(Nfiles):
if n == 0:
lfps[n] = np.load('C:/gh/data/example/lfp_set/' + str(10) + '.npy')
else:
lfps[n] = np.load('C:/gh/data/example/lfp_set/' + str(n) + '.npy')
|
demo_OSG_python/demo_osg_python - plot output from OSG.ipynb
|
srcole/qwm
|
mit
|
4. Plot peaks and troughs on top of signals
|
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=(10,10))
for n in range(Nfiles):
plt.subplot(Nfiles, 1, n+1)
plt.plot(lfps[n], 'k')
plt.plot(Ps[n], lfps[n][Ps[n]], 'bo')
plt.plot(Ts[n], lfps[n][Ts[n]], 'ro')
if n == Nfiles-1:
plt.xlabel('Time (ms)')
else:
plt.xticks([])
plt.ylim((-3000,3000))
plt.yticks([-3000,0,3000])
if n == 0:
plt.ylabel('Voltage (uV)')
|
demo_OSG_python/demo_osg_python - plot output from OSG.ipynb
|
srcole/qwm
|
mit
|
Use a set of standard plots
A graphical representation can always be on hand
|
trace = Trace(platform, boost15_trace,
["sched_switch",
"sched_overutilized",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_boost_cpu",
"sched_boost_task",
"cpu_frequency",
"cpu_capacity",
],
plots_prefix='boost15_'
)
|
ipynb/tutorial/UseCaseExamples_SchedTuneAnalysis.ipynb
|
arnoldlu/lisa
|
apache-2.0
|
set the stage for data visualization
|
plt.interactive(False)
sns.set(style="whitegrid",color_codes=True)
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
Load the dataset and make a copy of it
|
# Reading the data where low_memory=False increases the program efficiency
data= pd.read_csv("data-taarifa.csv", low_memory=False)
sub1=data.copy()
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
List all the variables in the dataset
|
list(sub1.columns.values)
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
Data standardisation- Lowercase all the variable names
|
#lowercase all variables
sub1.columns = [x.lower() for x in sub1.columns]
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
A quick peek at the dataset
|
sub1.head(5)
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
Problem challenge # 1
Missing value treatment
A majority of the variables in this dataset are categorical. Therefore, I treat the missing values to the mode (replacement of missing values by most frequently occuring values)
|
## To fill every column with its own most frequent value you can use
sub1 = sub1.apply(lambda x:x.fillna(x.value_counts().index[0]))
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
Problem challenge # 2
Using one hot encoding to convert categorical variables of interest to dummy continuous numbers
For reference see this Kaggle post by Mark, https://www.kaggle.com/c/titanic/forums/t/5379/handling-categorical-data-with-sklearn
|
from sklearn import preprocessing
le_enc = preprocessing.LabelEncoder()
#to convert into numbers
sub1.permit = le_enc.fit_transform(sub1.permit)
sub1.extraction_type_class=le_enc.fit_transform(sub1.extraction_type_class)
sub1.payment_type=le_enc.fit_transform(sub1.payment_type)
sub1.quality_group=le_enc.fit_transform(sub1.quality_group)
sub1.quantity_group=le_enc.fit_transform(sub1.quantity_group)
sub1.waterpoint_type_group=le_enc.fit_transform(sub1.waterpoint_type_group)
sub1.water_quality=le_enc.fit_transform(sub1.water_quality)
sub1.source_class=le_enc.fit_transform(sub1.source_class)
sub1.status_group=le_enc.fit_transform(sub1.status_group)
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
Another quick peek at the dataset. Notice, variables of interest like 'permit', 'extraction_type', 'payment_type', 'quality_group' have now been assigned dummy codes as required
|
sub1.head(5)
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
explanatory variable is also known as the 'Independent variable' and response variable is also known as the 'dependent variable'
Variables of interest for this study are;
status_group,extraction_type_class,payment_type,quality_group,quantity_group,waterpoint_type_group,source_class,permit,water_quality
Correlational Analysis for variables of interest
From a previous post(), I want to determine if there is any relationship between water quality, water quantity and water resource characterstics. So I use the regression methods as shown below to determine this.
|
print ("OLS regresssion model for the association between water pump condition status and quality of water in it")
reg1=smf.ols('status_group~permit',data=sub1).fit()
print (reg1.summary())
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
Status group is the response or dependent variable and permit is the independent variable.
The number of observations show the no. of observations that had valid data and thus were included in the analysis.
The F-statistic is 66.57 and the p value is very small (Prob (F-statistic))= 3.44e-16 considerably less than our alpha level
of 0.05 which tell us that we can reject the null hypothesis and conclude that permit is significantly associated with water pump status group.
The linear regression equation Y = b0 + b1X where X is the explanatory variable or the independent variable and Y is the response or the dependent variable.--(EQN 1)
Note: EQN 1 is significant because it can also help us in prediction of Y. Next, we look at the parameter estimates or the coeffecients or beta weights .
Thus the coeffecient for permit is -0.0697 and the intercept is 0.8903.
Than the best fit line for permit is; status_group=0.89+0.06*permit -- (EQN 2)
In the above example, lets say we are told that a country has 80% people with valid water permits than can we predict the status of the water pump device?
Yes, we plug the value of 80 in EQN 2 as given b0 = 0.89, b1 = 0.06 permit= 80
Than, y(hat) = 0.89+0.06*80 y(hat)= 5.69 or we can say that for 80% people with valid permits there will be approximately 6% water pumps that are functional
Also note the P>|t| value is very small for permit. It is 0.0 and that the R-squared value is 0.001.
We now know that this model accounts for 0.001% variability that we see in our response variable permit.
|
# Now, I continue to add the variables to this model to check for any loss of significance
print ("OLS regresssion model for the association between status_group and other variables of interest")
reg1=smf.ols('status_group~quantity_group+extraction_type_class+waterpoint_type_group',data=sub1).fit()
print (reg1.summary())
print ("OLS regresssion model for the association between water pump status group and all variables of interest")
reg1=smf.ols('status_group~extraction_type_class+payment_type+quality_group+quantity_group+waterpoint_type_group+source_class+permit+water_quality',data=sub1).fit()
print (reg1.summary())
scat1 = sns.regplot(x="status_group", y="quality_group", order=2, scatter=True, data=sub1)
plt.xlabel('Water pump status')
plt.ylabel ('quality of the water')
plt.title ('Scatterplot for the association between water pump status and water quality')
#print scat1
plt.show()
|
scripts/general/Taarifa_Regression.ipynb
|
duttashi/Data-Analysis-Visualization
|
mit
|
Learn a projection using SEF
First, we have to create a linear SEF object. Note that we have to supply the number of input dimensions, as well as the target dimensionality. We can also specify the learning rate and the regularizer weight in the class constructor (usually the default values work just fine).
|
import sef_dr
linear_sef = sef_dr.LinearSEF(input_dimensionality=x_train.shape[1], output_dimensionality=9)
# Move the model to GPU (comment this out, if a cuda-enabled GPU is not availabe)
linear_sef.cuda()
|
tutorials/Supervised DR.ipynb
|
passalis/sef
|
mit
|
Then, we fit the projection. We have to supply the type of the target similarity matrix (or a function that creates the target similarity matrix on-the-fly).
|
loss = linear_sef.fit(data=x_train[:5000], target_labels=y_train[:5000], target='supervised', epochs=100, regularizer_weight=0.001, learning_rate=0.0001, batch_size=128,)
|
tutorials/Supervised DR.ipynb
|
passalis/sef
|
mit
|
Let's examine the loss function:
|
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(np.arange(loss.shape[0]), loss)
plt.title('Loss during optimization')
plt.show()
|
tutorials/Supervised DR.ipynb
|
passalis/sef
|
mit
|
The loss is practically the same after the 20th iteration, so a solution has been found. Let's evaluate it!
First, transform the data:
|
# Tranform the data
train_data = linear_sef.transform(x_train)
test_data = linear_sef.transform(x_test)
|
tutorials/Supervised DR.ipynb
|
passalis/sef
|
mit
|
Then, use an SVM to evaluate the solution:
|
from sklearn import svm
from sklearn.grid_search import GridSearchCV
from sklearn.neighbors import NearestCentroid
from sklearn.preprocessing import MinMaxScaler
# Scale the data
scaler = MinMaxScaler()
train_data = scaler.fit_transform(train_data)
test_data = scaler.transform(test_data)
parameters = {'kernel': ['linear'], 'C': [0.01, 0.1, 1, 10, 100, 1000, 10000]}
model = svm.SVC(max_iter=10000)
clf = GridSearchCV(model, parameters, cv=3)
clf.fit(train_data[:5000], y_train[:5000])
print("Linear SEF Αccuracy = ", clf.score(test_data, y_test) * 100, "%")
|
tutorials/Supervised DR.ipynb
|
passalis/sef
|
mit
|
Compare to LDA
The solution seems good enough, but let's compare it to the regular LDA method!
|
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# Fit the LDA projection
lda = LinearDiscriminantAnalysis(n_components=9)
lda.fit(x_train[:5000, :], y_train[:5000])
# Tranform the data
train_data = lda.transform(x_train)
test_data = lda.transform(x_test)
# Scale the data
scaler = MinMaxScaler()
train_data = scaler.fit_transform(train_data)
test_data = scaler.transform(test_data)
parameters = {'kernel': ['linear'], 'C': [0.01, 0.1, 1, 10, 100, 1000, 10000]}
model = svm.SVC(max_iter=10000)
clf = GridSearchCV(model, parameters, cv=3)
clf.fit(train_data[:5000], y_train[:5000])
print ("SVM Αccuracy = ", clf.score(test_data, y_test) * 100, "%")
|
tutorials/Supervised DR.ipynb
|
passalis/sef
|
mit
|
You should see about a 27% increase in speed.
In the interest of transperency, if you change the target to parallel instead of cuda, the compiler will target the multi-core CPU availalbe on this instance and you will get similar performance to what you just got on the GPU. The reason for this is we're only porting a very small amount of computation the GPU, and therefore not hiding the latency of transferring data around. If you decide to take the Python labs on nvidia.qwiklab.com, you will see how we can achieve much greater increases in performance of this algorithm by moving more computation to the GPU with both library calls and some CUDA code, and hiding the cost of trasnferring data.
CUDA
Programming for the GPU in a CUDA-enabled language is the most flexible of the three approaches. While CUDA was initially just a C compiler when it was first released, it has grown into the parallel computing platform for accessing the general purpose, massively parallel compute power of an NVIDIA GPU.
There is a growing list of languages that understand how to speak CUDA and target the GPU including but not limited to C, C++, Fortran, R, and Python. In this lab, you will write some CUDA code directly in Python. This code will be compiled using Continuum Analytics Numba compiler which contains CUDA Python support.
Task #4
This task does not require any modifications to get working and will be generating the Mandelbrot Set. It is designed to show you the speed-up you can get using CUDA Python to move computational intensive portions of code to the GPU.
Executing the below cell will run the same algorithm on first the GPU and then again on the CPU. Both of these examples are using code compiled from Python using the Numba compiler. The timing of the GPU includes all data transfers between the CPU memory and GPU memory in order to make a fair comparison. While it's not explicitly coded, the Numba compiler is able to recognize and handle the need for the gimage data to be tranferred to the GPU before create_fractal_gpu is called, and back when it's complete. The cuda.synchronize is there to ensure the timing information is accurate.
Feel free to change the numIters variable to decrease or increase the number of iterations performed. In addition you can modify the fractal grid points (starting values of -2.0, 1.0, -1.0, 1.0) to change the area of the fractal processed. As you increase the number of iterations, you should see the gap in performance between the GPU and CPU increasing as the amount of computation hides the data transfer latency.
You will notice that the GPU version adds [griddim, blockdim] before the parameter list. These values control how the parallel work is spread across the GPU and will be described in more detail in the next task. You should run the next cell twice, the first time may be slower due to the one-time compilation of the create_fractal_* functions
|
from mymandel import *
numIters = 20
# Run the GPU Version first
gimage = np.zeros((1024, 1536), dtype = np.uint8)
blockdim = (32, 8)
griddim = (32, 16)
with mytimer('Mandelbrot created on GPU'):
create_fractal_gpu[griddim, blockdim](-2.0, 1.0, -1.0, 1.0, gimage, numIters)
cuda.synchronize
imshow(gimage)
show()
# Run the CPU Version last
image = np.zeros_like(gimage)
with mytimer('Mandelbrot created on CPU'):
create_fractal_cpu(-2.0, 1.0, -1.0, 1.0, image, numIters)
imshow(image)
show()
|
notes/nvidia_quicklabs/gpu_computing/Accelerated Computing.ipynb
|
rahul1990gupta/awesome-self-driving-car-resources
|
mit
|
You should see around a 9x speed increase when moving from the GPU to the CPU when using the original parameters.
If you are interested in seeing the rest of the code used in the above example, please execute the next cell. This is not a requirement for the lab, but you may find it insightful after you perform the next task. In addition, at the end of this lab, you are presented with a section on downloading the code for offline viewing - but be careful you don't run out of time!
|
%load mymandel.py
|
notes/nvidia_quicklabs/gpu_computing/Accelerated Computing.ipynb
|
rahul1990gupta/awesome-self-driving-car-resources
|
mit
|
Task 5 - Hello World
For this task, you get to try your hand and writing some CUDA Python code. We are going to be using the following concepts:
<code style="color:green">@cuda.autojit</code> - this decorator is used to tell the CUDA compiler that the function is to be compiled for the GPU. With autojit, the compiler will try and determine the type information of the variables being passed in. You can create your own signatures manually by using the jit decorator.
<code style="color:green">cuda.blockIdx.x</code> - this is a read-only variable that is defined for you. It is used within a GPU kernel to determine the ID of the block which is currently executing code. Since there will be many blocks running in parallel, we need this ID to help determine which chunk of data that particular block will work on.
<code style="color:green">cuda.threadIdx.x</code> - this is a read-only variable that is defined for you. It is used within a GPU kernel to determine the ID of the thread which is currently executing code in the active block.
<code style="color:green">myKernel[number_of_blocks, threads_per_block](...)</code> - this is the syntax used to launch a kernel on the GPU. Inside the list (the square brackets [...]), the first number is the total number of blocks we want to run on the GPU, and the second is the number of threads there are per block. It's possible, and in fact recommended, for one to schedule more blocks than the GPU can actively run in parallel. The system will just continue executing blocks until they have all completed. The following video addresses grids, blocks, and threads in more detail.
<div align="center"><iframe width="640" height="390" src="http://www.youtube.com/embed/KM-zbhyz9f4" frameborder="0" allowfullscreen></iframe></div>
Let's explore the above concepts by doing a simple "Hello World" example.
Most of the code in this example has already been written for you. Your task is to modify the single line in the hello function such that the data printed out at the end looks like:
[[0 0 0 0 0]]
What's happening is that all the threads in block 0 are writing the block ID into their respective place in the array. Remember that this function is being run in parallel by the threads in block 0, each with their own unique thread ID. Since we're launching a single block with 5 threads, the following is happening parallel:
data[0,0] = 0
data[0,1] = 0
data[0,2] = 0
data[0,3] = 0
data[0,4] = 0
If you get stuck, click on the link below the code to see the solution.
|
from numba import *
import numpy as np
@cuda.jit
def hello(data):
data[ , ] =
numBlocks = 1
threadsPerBlock = 5
data = np.ones((numBlocks, threadsPerBlock), dtype=np.uint8)
hello[numBlocks,threadsPerBlock](data)
print data
|
notes/nvidia_quicklabs/gpu_computing/Accelerated Computing.ipynb
|
rahul1990gupta/awesome-self-driving-car-resources
|
mit
|
Vertex client library: AutoML tabular classification model for online prediction
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_tabular_classification_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_tabular_classification_online.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex client library for Python to create tabular classification models and do online prediction using Google Cloud's AutoML.
Dataset
The dataset used for this tutorial is the Iris dataset from TensorFlow Datasets. This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of Iris flower species from a class of three species: setosa, virginica, or versicolor.
Objective
In this tutorial, you create an AutoML tabular classification model and deploy for online prediction from a Python script using the Vertex client library. You can alternatively create and deploy models using the gcloud command-line tool or online using the Google Cloud Console.
The steps performed include:
Create a Vertex Dataset resource.
Train the model.
View the model evaluation.
Deploy the Model resource to a serving Endpoint resource.
Make a prediction.
Undeploy the Model.
Costs
This tutorial uses billable components of Google Cloud (GCP):
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Installation
Install the latest version of Vertex client library.
|
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
|
notebooks/community/gapic/automl/showcase_automl_tabular_classification_online.ipynb
|
GoogleCloudPlatform/vertex-ai-samples
|
apache-2.0
|
Make a online prediction request
Now do a online prediction to your deployed model.
Make test item
You will use synthetic data as a test data item. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
|
INSTANCE = {
"petal_length": "1.4",
"petal_width": "1.3",
"sepal_length": "5.1",
"sepal_width": "2.8",
}
|
notebooks/community/gapic/automl/showcase_automl_tabular_classification_online.ipynb
|
GoogleCloudPlatform/vertex-ai-samples
|
apache-2.0
|
Section 1
In this section we'll cover:
Using Jax
Computing derivatives with Jax
Gradient Descent (single variable)
Newton's Method (single variable)
Jax
Autodiff cookbook
Jax is a python library that provides the ability to differentiate many python
functions.
|
import jax
import jax.numpy as jnp
from jax import jit, grad, vmap, api, random, jacfwd, jacrev
from jax.experimental import optimizers, stax
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Basic autodiff example
Let's take the derivative of $y = f(x) = x^2$, which we know to be $f'(x) = 2x$.
|
def square(x):
return x * x
# Compute the derivative with grad from Jax
dsquare = grad(square)
# Plot the function and the derivative
domain = np.arange(0, 2.5, 0.1)
plt.plot(domain, square(domain), label="$y=x^2$")
plt.plot(domain, list(map(dsquare, domain)), label="$y'=\\frac{dy}{dx} = 2x$")
# Note can also use JAX's vmap: vmap(dsquare)(domain) instead of list(map(dsquare, domain))
plt.legend()
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Exercise
Write a function to compute the sigmoid
$$f(x) = \frac{1}{1 + e^{-x}}$$
using jnp.exp to compute $e^x$ -- need to use the Jax version of
numpy for autodiff to work.
Then verify that
$$ f'(x) = f(x) (1 - f(x))$$
For example, compare some explicit values and plot the differences between the derivative and $f(x) (1 - f(x))$.
|
## Your code here
# Compute the sigmoid
def sigmoid(x):
pass
# Compute the derivative (using grad)
# Compare derivative values to f(x) * (1 - f(x))
# @title Solution (double-click to show)
# Compute the sigmoid
def sigmoid(x):
return 1. / (1. + jnp.exp(-x))
# Compute the derivative (using grad)
deriv = grad(sigmoid)
# Compare derivative values to f(x) * (1 - f(x))
xs = np.arange(-3, 3.1, 0.1)
ys = []
for x in xs:
ys.append(deriv(x) - sigmoid(x) * (1. - sigmoid(x)))
plt.scatter(xs, ys)
plt.title("Differences between Jax derivative and $f(x)(1-f(x))$\nNote the scale modifier in the upper left")
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Single variable gradient descent examples
In this example we solve $x^2=x$, which we know has two solutions. Different
initial starting points yield convergence to different solutions, or
non-convergence to either solution at $x_0 = 1/2$.
We need to turn the problem into one of finding local extrema. So we consider
the function $f(x) = \left( x^2 - x\right)^2$, which is differentiable and
has local minimum at $x=0$ and $x=1$. We square so that the points where
$x^2=x$ are minima instead of zeros of a function like $g(x) = x^2 - x$.
Notice in the plot below that the function also has a local maximum at $x=1/2$,
which is centered between the two solutions. Intuitively, gradient descent
starting at $x_0=1/2$ will not move because there's no reason to favor either
local minumum.
Let's plot the function first:
|
def f(x):
return (x**2 - x)**2
xs = np.arange(-1, 2.05, 0.05)
ys = f(xs)
plt.plot(xs, ys)
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Let's define a function to compute iterations of gradient descent.
$$\begin{eqnarray}
x_{n+1} &=& x_n - \alpha f'(x_n) \
\end{eqnarray}$$
|
def gradient_descent(dfunc, x0, iterations=100, alpha=0.1):
"""dfunc is the derivative of the function on which we
perform descent."""
xs = [x0]
for i in range(iterations):
x = xs[-1]
x = x - alpha * dfunc(float(x))
xs.append(x)
return xs
# Let's try it on our function f now.
# Compute the derivative
df = grad(f)
# Try some different starting points.
for x0 in [0.25, 0.5, 0.50001, 0.85]:
xs = gradient_descent(df, x0, iterations=30, alpha=1.)
plt.plot(range(len(xs)), xs, label=x0)
plt.xlabel("iterations")
plt.legend()
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Check your understanding: Explain what happened with each of the four curves in the plot.
Exercise: What happens if we decrease the learning rate $\alpha$?
Recreate the plot above using $\alpha=0.1$ instead.
|
## Your code here
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Example 2
In this example we use gradient descent to approximate $\sqrt{3}$. We use
the function $f(x) = \left(x^2 - 3\right)^2$ and construct a sequence converging
to the positive solution. In this case notice the impact of the learning rate
both on the time to convergence and whether the convergence is monotonic or
oscillitory. Larger learning rates such as $\alpha=1$ can cause divergence.
|
def f2(x):
return (x*x - 3)**2
df = grad(f2)
x0 = 2
for alpha in [0.08, 0.01]:
xs = gradient_descent(df, x0, iterations=40, alpha=alpha)
plt.plot(range(len(xs)), xs, label="$\\alpha = {}$".format(alpha))
plt.xlabel("iterations")
# Plot the correct value
sqrt3 = math.pow(3, 0.5)
n = len(xs)
plt.plot(range(n), [sqrt3]*n, label="$\\sqrt{3}$", linestyle="--")
plt.legend()
plt.show()
print("Sqrt(3) =", sqrt3)
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Exercise
Solve the equation $x = e^{-x}$, which does not have an easily obtainable solution algebraically.
The solution is approximately $x = 0.567$. Again note the impact of
the learning rate.
Use jnp.exp for the exponential function.
|
def f3(x):
"""Define the function f(x) = (x - e^(-x))^2."""
## Your code here
pass
# Compute the gradient
# Initial guess
x0 = 0.4
## Add code here for gradient descent using the functions above
## Plot the gradient descent values
#@title Solution (double click to expand)
def f3(x):
"""Define the function f(x) = (x - e^(-x))^2."""
return (x - jnp.exp(-x))**2
# Compute the gradient
df = grad(f3)
# Initial guess
x0 = 0.4
for alpha in [0.01, 0.1]:
xs = gradient_descent(df, x0, iterations=50, alpha=alpha)
plt.plot(range(len(xs)), xs, label="$\\alpha = {}$".format(alpha))
plt.xlabel("iterations")
plt.legend()
plt.show()
print("Final iteration:", xs[-1])
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Newton's method, single variable
We can use Newton's
method to find zeros of functions. Since local extrema occur at zeros of
the derivative, we can apply Newton's method to the first derivative to obtain
a second-order alternative to gradient descent.
$$\begin{eqnarray}
x_{n+1} &=& x_n - \alpha \frac{f'(x_n)}{f''(x_n)} \
\end{eqnarray}$$
|
def newtons_method(func, x0, iterations=100, alpha=1.):
dfunc = grad(func)
xs = [x0]
for i in range(iterations):
x = xs[-1]
x = x - alpha * func(x) / dfunc(float(x))
xs.append(x)
return xs
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Let's repeat the example of finding the value of $\sqrt{3}$ with Newton's method and compare to gradient descent.
For small $\alpha$, gradient descent seems to perform better:
|
def f(x):
return (x**2 - 3)**2
# Let's make a function we can reuse
def compare_gradient_newton(func, x0, alpha=0.01, iterations=50):
# Compute the first and second derivatives
df = grad(func)
# Compute Newton's method iterations
xs = newtons_method(df, x0, alpha=alpha, iterations=iterations)
# Compute gradient descent with same alpha
xs2 = gradient_descent(df, x0, alpha=alpha, iterations=iterations)
# Plot it all
plt.plot(range(len(xs2)), xs2, label="Gradient Descent")
plt.plot(range(len(xs)), xs, label="Newton's method")
plt.xlabel("iterations")
plt.legend()
plt.title("$\\alpha = {}$".format(alpha))
# Plot the solution
sqrt3 = math.pow(3, 0.5)
n = len(xs)
plt.plot(range(n), [sqrt3]*n, label="$\\sqrt{3}$", linestyle="--")
plt.show()
compare_gradient_newton(f, 2., alpha=0.01, iterations=50)
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
But for larger $\alpha$, Newton's method is better behaved and gradient descent fails to converge.
|
compare_gradient_newton(f, 2., alpha=0.1, iterations=50)
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
In this case we can also apply Newton's method with just the first derivative to find a zero of $x^2 - 3$, i.e. we don't have to look for a minimum of
$(x^2 - 3)^2$ since Newton's method can also find zeros of functions.
|
def f(x):
return x**2 - 3
xs = newtons_method(f, 2., alpha=0.5, iterations=10)
plt.plot(range(len(xs)), xs, label="$\\alpha = {}$".format(alpha))
plt.xlabel("iterations")
# Plot the solution
sqrt3 = math.pow(3, 0.5)
n = len(xs)
plt.plot(range(n), [sqrt3]*n, label="$\\sqrt{3}$", linestyle="--")
print(xs[-1])
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Section 2
Now we'll look at multivariate derivatives and gradient descent,
again using Jax.
Multivariate derivatives
$$f(x, y) = x y^2$$
$$ \nabla f = [y^2, 2 x y]$$
|
def f(x, y):
return x * y * y
# Compute the partial derivatives with grad from Jax
# Use float as arguments, else Jax will complain
print("f(3, 1)=", f(3., 1.))
# argnums allows us to specify which variable to take the derivative of, positionally
print("Partial x derivative at (3, 1):", grad(f, argnums=0)(3., 1.))
print("Partial y derivative at (3, 1):", grad(f, argnums=1)(3., 1.))
# We can get both partials at the same time
print("Gradient vector at (3, 1):", grad(f, (0, 1))(3., 1.))
g = [float(z) for z in grad(f, (0, 1))(3., 1.)]
print("Gradient vector at (3, 1):", g)
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
We can plot some of the vectors of a gradient. Let's consider
$$f(x, y) = x^2 + y^2$$
The partial derivatives are
$$\frac{\partial f}{\partial x} = 2x$$
$$\frac{\partial f}{\partial y} = 2y$$
So the gradient is $$\nabla f = [2x, 2y]^T$$
|
def f(x, y):
return x * x + y * y
partial_x = grad(f, argnums=0)
partial_y = grad(f, argnums=1)
xs = np.arange(-1, 1.25, 0.25)
ys = np.arange(-1, 1.25, 0.25)
plt.clf()
# Compute and plot the gradient vectors
for x in xs:
for y in ys:
u = partial_x(x, y)
v = partial_y(x, y)
plt.arrow(x, y, u, v, length_includes_head=True,
head_width=0.1)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Jacobians and Hessians with Jax
Let's verify some of the example from the slides.
Let $f(x) = \sum_i{x_i} = x_1 + \cdots + x_n$. Then the gradient is $\nabla f (x) = [1, \ldots, 1]^T.$
|
n = 4
def f(x):
return sum(x)
test_point = [1., 2., 3., 4.]
print("x = ", test_point)
print("Gradient(x):", [float(x) for x in grad(f)(test_point)])
## Try other test points, even random ones:
test_point = np.random.rand(4)
print()
print("x = ", test_point)
print("Gradient(x):", [float(x) for x in grad(f)(test_point)])
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Exercise
Compute the gradient of the function that sums the squares of the elements of a vector.
|
# @title Solution (double click to show)
def sum_squares(x):
return jnp.dot(x, x)
test_point = np.array([1., 2., 3., 4.])
print("x = ", test_point)
print("Gradient(x):", [float(x) for x in grad(sum_squares)(test_point)])
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Let's try a Jacobian now. In the slides we saw that Jacobian of $f(\mathbf{x}) = Ax$ is $A$. Let's verify with Jax.
|
A = np.array([[1., 2.], [3., 4.]])
def f(x):
return jnp.dot(A, x)
x = np.array([1., 1.])
jacfwd(f)(x)
# Try some other matrices A and a 3x3 matrix
# Note that Jax handles larger matrices with the same code
# But you'll need a length 3 vector for x
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
We can compute the Hessian by taking the Jacobian twice. In this case, $f(\mathbf{x}) = Ax$ is a linear function, so the second derivatives should all be zero.
|
A = np.array([[1., 2.], [3., 4.]])
def f(x):
return jnp.dot(A, x)
def hessian(f):
return jacfwd(jacrev(f))
x = np.array([1., 1.])
hessian(f)(x)
# Try some other matrices A and a 3x3 matrix
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Exercise
Now try to take derivatives of the function $f(\mathbf{x}) = x \cdot A x$. Hints:
* Is it scalar or vector-valued?
* Does it matter if $A$ is symmetric $(A = A^T)$ or anti-symmetric $(A = -A^T)$?
|
# Your code here
def f(x):
"""Compute x . A x"""
pass
# Compute the first and second derivatives at a test point x
#@title Solution (double-click to show)
A = np.array([[1., 0.], [0., 3.]])
def f(x):
return jnp.dot(x, jnp.dot(A, x))
x = np.array([2., -1.])
print(grad(f)(x))
print(hessian(f)(x))
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Exercise: Entropy
Compute the Gradient and Hessian of the Shannon entropy:
$$S = -\sum_{i}{x_i \log x_i}$$
Note that for a test point you'll need to have all the elements positive and summing to 1, so a good choice is $$[1 / n, \ldots, 1 / n]$$
|
## Your code here
def entropy(x):
pass
#@title Solution (double-click to show)
def entropy(x):
return - sum(a * jnp.log(a) for a in x)
x = np.array([1./2, 1./2])
print(entropy(x))
print(grad(entropy)(x))
print(hessian(entropy)(x))
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Example: Linear Regression with Jax
Given some data of the form $(x_i, y_i)$, let's find a best fit line $$ y = m x + b $$ by minimizing the sum of squared errors.
$$ S = \sum_{i}{\left(y_i - (m x_i + b) \right)^2}$$
|
## Adapted from JAX docs: https://coax.readthedocs.io/en/latest/examples/linear_regression/jax.html
# Generate some data using sklearn
X, y = make_regression(n_features=1, noise=10)
X, X_test, y, y_test = train_test_split(X, y)
# Plot the data
plt.scatter([x[0] for x in X], y)
plt.title("Randomly generated dataset")
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Read through the following code, which minimizes the sum of squared errors for a linear model.
|
# In JAX, we can specify our parameters as various kinds of Python objects,
# including dictionaries.
# Initial model parameters
params = {
'w': jnp.zeros(X.shape[1:]),
'b': 0.
}
# The model function itself, a linear function.
def forward(params, X):
"""y = w x + b"""
return jnp.dot(X, params['w']) + params['b']
# The loss function we want to minimize, the sum of squared errors
# of the model prediction versus the true values
def sse(params, X, y):
"""Sum of squared errors (mean)"""
err = forward(params, X) - y
return jnp.mean(jnp.square(err))
# Function to update our parameters in each step of gradient descent
def update(params, grads, alpha=0.1):
return jax.tree_multimap(lambda p, g: p - alpha * g, params, grads)
# We'll define a gradient descent function similarly to as before,
# and we'll track the loss function values for plotting
# Note also that we compute our gradients on the training data X and y
# but our loss function on the test data X_test and y_test
def gradient_descent(f, params, X, X_test, y, y_test, alpha=0.1, iterations=30):
"""
Apply gradient descent to the function f with starting point x_0
and learning rate \alpha.
x_{n+1} = x_n - \alpha d_f(x_n)
"""
grad_fn = grad(f)
params_ = []
losses = []
for _ in range(iterations):
grads = grad_fn(params, X, y)
params = update(params, grads, alpha)
params_.append(params)
loss = f(params, X_test, y_test)
losses.append(loss)
return params_, losses
# Function to plot our residuals to see how the loss function progresses
def plot_residuals(params, model_fn, X, y, color='blue'):
res = y - model_fn(params, X)
plt.hist(res, bins=10, color=color, alpha=0.5)
# Find the best fit line
fit_params, losses = gradient_descent(sse, params, X, X_test, y, y_test)
# Plot the decrease in the loss function over iterations.
plt.plot(range(len(losses)), losses)
plt.ylabel("SSE")
plt.xlabel("Iteration")
plt.title("Loss evolution\nMinimizing sum of squared errors")
plt.show()
# Compare the errors of our initial guess model with the final model
# Note the differences in scales
plot_residuals(params, forward, X, y)
plt.title("Histogram of initial residuals (errors for each point)")
plt.show()
plot_residuals(fit_params[-1], forward, X, y, color='green')
plt.title("Histogram of final residuals (errors for each point)")
plt.show()
# Let's plot the best fit line
# Plot the data
xs = [x[0] for x in X]
plt.scatter(xs, y)
plt.title("Best fit line")
# Plot the best fit line
params = fit_params[-1]
xs = np.arange(min(xs), max(xs), 0.1)
m = float(params['w'])
b = float(params['b'])
ys = [m * x + b for x in xs]
plt.plot(xs, ys, color='black')
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
We can easily use another loss function, like the mean absolute error, where use the absolute value of residuals instead of the square, which reduces the
impact of outliers.
$$ S = \sum_{i}{\left|y_i - (m x_i + b) \right|}$$
This will give us a different best fit line for some data sets.
|
## Minimize MAE instead of SSE
# MAE is the p=1 case of this function.
def lp_norm(p=2):
def norm(params, X, y):
err = forward(params, X) - y
return jnp.linalg.norm(err, ord=p)
return norm
# Generate some noisier data
X, y = make_regression(n_features=1, noise=100, bias=5)
X, X_test, y, y_test = train_test_split(X, y)
fit_params, losses = gradient_descent(lp_norm(p=1.), params, X, X_test, y, y_test)
# Plot the data
xs = [x[0] for x in X]
plt.scatter(xs, y)
plt.title("Best fit line")
# Plot the best fit line
params = fit_params[-1]
xs = np.arange(min(xs), max(xs), 0.1)
m = float(params['w'])
b = float(params['b'])
ys = [m*x+b for x in xs]
plt.plot(xs, ys, color='black', label="MAE")
# Compare to SEE best fit line
# Find the best fit line
fit_params, losses = gradient_descent(sse, params, X, X_test, y, y_test)
# Plot the best fit line
params = fit_params[-1]
xs = np.arange(min(xs), max(xs), 0.1)
m = float(params['w'])
b = float(params['b'])
ys = [m*x+b for x in xs]
plt.plot(xs, ys, color='green', label="SSE")
plt.legend()
plt.show()
|
colabs/Multivariate Calculus for ML, 1 of 2.ipynb
|
google/physics-math-tutorials
|
apache-2.0
|
Vi bruker her oversikten over lusetellinger fra fiskehelse.no/ Mattilsynet igjen
|
df = pd.read_excel('data/fiskehelse_2017-10-27-lakselus_per_fisk.xlsx')
|
notebooks/02 Vasking av data.ipynb
|
BergensTidende/dataskup-2017-notebooks
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.