repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo/dask_app.py | import pandas as pd
import cudf
from dash import dcc, html
import numpy as np
import pandas as pd
from dash.dependencies import Input, Output, State
from dash import dcc
import dash_bootstrap_components as dbc
import time
import dash_daq as daq
import dash
from dask import delayed
from distributed import Client
from dask_cuda import LocalCUDACluster
from utils.utils import *
import argparse
# ### Dashboards start here
text_color = "#cfd8dc" # Material blue-grey 100
(
data_center_3857,
data_3857,
data_4326,
data_center_4326,
selected_map_backup,
selected_race_backup,
selected_county_top_backup,
selected_county_bt_backup,
view_name_backup,
gpu_enabled_backup,
dragmode_backup,
) = ([], [], [], [], None, None, None, None, None, None, "pan")
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.BOOTSTRAP])
app.layout = html.Div(
children=[
################# Title Bar ##############
html.Div(
[
html.H1(
children=[
"Census 2020 Net Migration Visualization",
html.A(
html.Img(
src="assets/rapids-logo.png",
style={
"float": "right",
"height": "45px",
"margin-right": "1%",
"margin-top": "-7px",
},
),
href="https://rapids.ai/",
),
html.A(
html.Img(
src="assets/dash-logo.png",
style={"float": "right", "height": "30px"},
),
href="https://dash.plot.ly/",
),
],
style={
"text-align": "left",
"heights": "30px",
"margin-left": "20px",
},
),
]
),
###################### Options Bar ######################
html.Div(
children=[
html.Div(
children=[
html.Table(
[
html.Tr(
[
html.Td(
html.Div("CPU"),
style={
"font-size": "20px",
"padding-left": "1.3rem",
}, # className="config-label"
),
html.Td(
html.Div(
[
daq.DarkThemeProvider(
daq.BooleanSwitch(
on=True, # Turn on CPU/GPU
color="#00cc96",
id="gpu-toggle",
)
),
dbc.Tooltip(
"Caution: Using CPU compute for more than 50 million points is not recommended.",
target="gpu-toggle",
placement="bottom",
autohide=True,
style={
"textAlign": "left",
"font-size": "15px",
"color": "white",
"width": "350px",
"padding": "15px",
"border-radius": "5px",
"background-color": "#2a2a2e",
},
),
]
)
),
html.Td(
html.Div("GPU + RAPIDS"),
style={
"font-size": "20px"
}, # , className="config-label"
),
####### Indicator graph ######
html.Td(
[
dcc.Loading(
dcc.Graph(
id="indicator-graph",
figure=blank_fig(50),
config={
"displayModeBar": False
},
style={"width": "95%"},
),
color="#b0bec5",
# style={'height': f'{50}px', 'width':'10px'}
), # style={'width': '50%'},
]
),
###### VIEWS ARE HERE ###########
html.Td(
html.Div("Data-Selection"),
style={"font-size": "20px"},
), # className="config-label"
html.Td(
dcc.Dropdown(
id="view-dropdown",
options=[
{
"label": "Total Population",
"value": "total",
},
{
"label": "Migrating In",
"value": "in",
},
{
"label": "Stationary",
"value": "stationary",
},
{
"label": "Migrating Out",
"value": "out",
},
{
"label": "Net Migration",
"value": "net",
},
{
"label": "Population with Race",
"value": "race",
},
],
value="in",
searchable=False,
clearable=False,
),
style={
"width": "10%",
"height": "15px",
},
),
html.Td(
html.Div(
children=[
html.Button(
"Clear All Selections",
id="clear-all",
className="reset-button",
),
]
),
style={
"width": "10%",
"height": "15px",
},
),
]
),
],
style={"width": "100%", "margin-top": "0px"},
),
# Hidden div inside the app that stores the intermediate value
html.Div(
id="datapoints-state-value",
style={"display": "none"},
),
],
className="columns pretty_container",
), # className='columns pretty_container', id="config-div"),
]
),
########## End of options bar #######################################
html.Hr(id="line1", style={"border": "1px solid grey", "margin": "0px"}),
# html.Div( html.Hr(id='line',style={'border': '1px solid red'}) ),
##################### Map starts ###################################
html.Div(
children=[
html.Button(
"Clear Selection", id="reset-map", className="reset-button"
),
html.H4(
[
"Individual Distribution",
],
className="container_title",
),
dcc.Graph(
id="map-graph",
config={"displayModeBar": False},
figure=blank_fig(440),
),
# Hidden div inside the app that stores the intermediate value
html.Div(id="intermediate-state-value", style={"display": "none"}),
],
className="columns pretty_container",
style={"width": "100%", "margin-right": "0", "height": "66%"},
id="map-div",
),
html.Hr(id="line2", style={"border": "1px solid grey", "margin": "0px"}),
################# Bars start #########################
# Race start
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-race",
className="reset-button",
),
html.H4(
[
"Race Distribution",
],
className="container_title",
),
dcc.Graph(
id="race-histogram",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
animate=False,
),
],
className="columns pretty_container",
id="race-div",
style={"width": "33.33%", "height": "20%"},
),
# County top starts
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-county-top",
className="reset-button",
),
html.H4(
[
"County-wise Top 15",
],
className="container_title",
),
dcc.Graph(
id="county-histogram-top",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
animate=False,
),
],
className="columns pretty_container",
id="county-div-top",
style={"width": "33.33%", "height": "20%"},
),
# County bottom starts
html.Div(
children=[
html.Button(
"Clear Selection",
id="clear-county-bottom",
className="reset-button",
),
html.H4(
[
"County-wise Bottom 15",
],
className="container_title",
),
dcc.Graph(
id="county-histogram-bottom",
config={"displayModeBar": False},
figure=blank_fig(row_heights[2]),
animate=False,
),
],
className="columns pretty_container",
id="county-div-bottom",
style={"width": "33.33%", "height": "20%"},
),
############## End of Bars #####################
html.Hr(id="line3", style={"border": "1px solid grey", "margin": "0px"}),
html.Div(
[
html.H4(
"Acknowledgements and Data Sources",
style={"margin-top": "0"},
),
dcc.Markdown(
"""\
- 2020 Population Census and 2010 Population Census to compute Migration Dataset, used with permission from IPUMS NHGIS, University of Minnesota, [www.nhgis.org](https://www.nhgis.org/) ( not for redistribution ).
- Base map layer provided by [Mapbox](https://www.mapbox.com/).
- Dashboard developed with [Plotly Dash](https://plotly.com/dash/).
- Geospatial point rendering developed with [Datashader](https://datashader.org/).
- GPU toggle accelerated with [RAPIDS cudf and dask_cudf](https://rapids.ai/) and [cupy](https://cupy.chainer.org/), CPU toggle with [pandas](https://pandas.pydata.org/).
- For source code and data workflow, visit our [GitHub](https://github.com/rapidsai/plotly-dash-rapids-census-demo/tree/master).
"""
),
],
style={"width": "100%"},
className="columns pretty_container",
),
]
)
# Clear/reset button callbacks
@app.callback(
Output("map-graph", "selectedData"),
[Input("reset-map", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_map(*args):
return None
@app.callback(
Output("race-histogram", "selectedData"),
[Input("clear-race", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_race_hist_selections(*args):
return None
@app.callback(
Output("county-histogram-top", "selectedData"),
[Input("clear-county-top", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_county_hist_top_selections(*args):
return None
@app.callback(
Output("county-histogram-bottom", "selectedData"),
[Input("clear-county-bottom", "n_clicks"), Input("clear-all", "n_clicks")],
)
def clear_county_hist_bottom_selections(*args):
return None
# # Query string helpers
def register_update_plots_callback(client):
"""
Register Dash callback that updates all plots in response to selection events
Args:
df_d: Dask.delayed pandas or cudf DataFrame
"""
@app.callback(
[
Output("indicator-graph", "figure"),
Output("map-graph", "figure"),
Output("map-graph", "config"),
Output("map-graph", "relayoutData"),
Output("county-histogram-top", "figure"),
Output("county-histogram-top", "config"),
Output("county-histogram-bottom", "figure"),
Output("county-histogram-bottom", "config"),
Output("race-histogram", "figure"),
Output("race-histogram", "config"),
Output("intermediate-state-value", "children"),
],
[
Input("map-graph", "relayoutData"),
Input("map-graph", "selectedData"),
Input("race-histogram", "selectedData"),
Input("county-histogram-top", "selectedData"),
Input("county-histogram-bottom", "selectedData"),
Input("view-dropdown", "value"),
Input("gpu-toggle", "on"),
],
[
State("intermediate-state-value", "children"),
State("indicator-graph", "figure"),
State("map-graph", "figure"),
State("map-graph", "config"),
State("map-graph", "relayoutData"),
State("county-histogram-top", "figure"),
State("county-histogram-top", "config"),
State("county-histogram-bottom", "figure"),
State("county-histogram-bottom", "config"),
State("race-histogram", "figure"),
State("race-histogram", "config"),
State("intermediate-state-value", "children"),
],
)
def update_plots(
relayout_data,
selected_map,
selected_race,
selected_county_top,
selected_county_bottom,
view_name,
gpu_enabled,
coordinates_backup,
*backup_args,
):
global data_3857, data_center_3857, data_4326, data_center_4326, selected_map_backup, selected_race_backup, selected_county_top_backup, selected_county_bt_backup, view_name_backup, gpu_enabled_backup, dragmode_backup
# condition to avoid reloading on tool update
if (
type(relayout_data) == dict
and list(relayout_data.keys()) == ["dragmode"]
and selected_map == selected_map_backup
and selected_race_backup == selected_race
and selected_county_top_backup == selected_county_top
and selected_county_bt_backup == selected_county_bottom
and view_name_backup == view_name
and gpu_enabled_backup == gpu_enabled
):
backup_args[1]["layout"]["dragmode"] = relayout_data["dragmode"]
dragmode_backup = relayout_data["dragmode"]
return backup_args
selected_map_backup = selected_map
selected_race_backup = selected_race
selected_county_top_backup = selected_county_top
selected_county_bt_backup = selected_county_bottom
view_name_backup = view_name
gpu_enabled_backup = gpu_enabled
t0 = time.time()
if coordinates_backup is not None:
coordinates_4326_backup, position_backup = coordinates_backup
else:
coordinates_4326_backup, position_backup = None, None
# Get delayed dataset from client
if gpu_enabled:
df = client.get_dataset("c_df_d")
else:
df = client.get_dataset("pd_df_d")
colorscale_name = "Viridis"
if data_3857 == []:
projections = delayed(set_projection_bounds)(df)
(
data_3857,
data_center_3857,
data_4326,
data_center_4326,
) = projections.compute()
figures = build_updated_figures_dask(
df,
relayout_data,
selected_map,
selected_race,
selected_county_top,
selected_county_bottom,
colorscale_name,
data_3857,
data_center_3857,
data_4326,
data_center_4326,
coordinates_4326_backup,
position_backup,
view_name,
)
(
datashader_plot,
race_histogram,
county_top_histogram,
county_bottom_histogram,
n_selected_indicator,
coordinates_4326_backup,
position_backup,
) = figures
barchart_config = {
"displayModeBar": True,
"modeBarButtonsToRemove": [
"zoom2d",
"pan2d",
"select2d",
"lasso2d",
"zoomIn2d",
"zoomOut2d",
"resetScale2d",
"hoverClosestCartesian",
"hoverCompareCartesian",
"toggleSpikelines",
],
}
compute_time = time.time() - t0
print(f"Query time: {compute_time}")
n_selected_indicator["data"].append(
{
"title": {"text": "Query Time"},
"type": "indicator",
"value": round(compute_time, 4),
"domain": {"x": [0.53, 0.61], "y": [0, 0.5]},
"number": {
"font": {
"color": text_color,
"size": "50px",
},
"suffix": " seconds",
},
}
)
datashader_plot["layout"]["dragmode"] = (
relayout_data["dragmode"]
if (relayout_data and "dragmode" in relayout_data)
else dragmode_backup
)
return (
n_selected_indicator,
datashader_plot,
{
"displayModeBar": True,
"modeBarButtonsToRemove": [
"lasso2d",
"zoomInMapbox",
"zoomOutMapbox",
"toggleHover",
],
},
relayout_data,
race_histogram,
barchart_config,
county_top_histogram,
barchart_config,
county_bottom_histogram,
barchart_config,
(coordinates_4326_backup, position_backup),
)
def publish_dataset_to_cluster(cuda_visible_devices):
census_data_url = "https://data.rapids.ai/viz-data/total_population_dataset.parquet"
data_path = "../data/total_population_dataset.parquet"
check_dataset(census_data_url, data_path)
# Note: The creation of a Dask LocalCluster must happen inside the `__main__` block,
cluster = (
LocalCUDACluster(CUDA_VISIBLE_DEVICES=cuda_visible_devices)
if cuda_visible_devices
else LocalCUDACluster()
)
client = Client(cluster)
print(f"Dask status: {cluster.dashboard_link}")
# Load dataset and persist dataset on cluster
def load_and_publish_dataset():
# dask_cudf DataFrame
c_df_d = load_dataset(data_path, "dask_cudf").persist()
# pandas DataFrame
pd_df_d = load_dataset(data_path, "dask").persist()
# Unpublish datasets if present
for ds_name in ["pd_df_d", "c_df_d"]:
if ds_name in client.datasets:
client.unpublish_dataset(ds_name)
# Publish datasets to the cluster
client.publish_dataset(pd_df_d=pd_df_d)
client.publish_dataset(c_df_d=c_df_d)
load_and_publish_dataset()
# Precompute field bounds
c_df_d = client.get_dataset("c_df_d")
# Register top-level callback that updates plots
register_update_plots_callback(client)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cuda_visible_devices",
help="supply the value of CUDA_VISIBLE_DEVICES as a comma separated string (e.g: --cuda_visible_devices=0,1), if None, all the available GPUs are used",
default=None,
)
args = parser.parse_args()
# development entry point
publish_dataset_to_cluster(args.cuda_visible_devices)
# Launch dashboard
app.run_server(debug=False, dev_tools_silence_routes_logging=True, host="0.0.0.0")
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo/utils/__init__.py | from .utils import *
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo/utils/utils.py | from bokeh import palettes
from pyproj import Transformer
import cudf
import cupy as cp
import dask.dataframe as dd
import datashader as ds
import datashader.transfer_functions as tf
import io
import numpy as np
import os
import pandas as pd
import pickle
import requests
try:
import dask_cudf
except ImportError:
dask_cudf = None
# Colors
bgcolor = "#000000" # mapbox dark map land color
text_color = "#cfd8dc" # Material blue-grey 100
mapbox_land_color = "#000000"
c = 9200
# Colors for categories
colors = {}
colors["race"] = {
1: "aqua",
2: "lime",
3: "yellow",
4: "orange",
5: "blue",
6: "fuchsia",
7: "saddlebrown",
}
race2color = {
"White": "aqua",
"African American": "lime",
"American Indian": "yellow",
"Asian alone": "orange",
"Native Hawaiian": "blue",
"Other Race alone": "fuchsia",
"Two or More": "saddlebrown",
}
colors["net"] = {
-1: palettes.RdPu9[2],
0: palettes.Greens9[4],
1: palettes.PuBu9[2],
} # '#32CD32'
# Figure template
row_heights = [150, 440, 300, 75]
template = {
"layout": {
"paper_bgcolor": bgcolor,
"plot_bgcolor": bgcolor,
"font": {"color": text_color},
"margin": {"r": 0, "t": 0, "l": 0, "b": 0},
"bargap": 0.05,
"xaxis": {"showgrid": False, "automargin": True},
"yaxis": {"showgrid": True, "automargin": True},
# 'gridwidth': 0.5, 'gridcolor': mapbox_land_color},
}
}
url = "https://raw.githubusercontent.com/rapidsai/plotly-dash-rapids-census-demo/main/id2county.pkl"
id2county = pickle.load(io.BytesIO(requests.get(url).content))
county2id = {v: k for k, v in id2county.items()}
id2race = {
0: "All",
1: "White",
2: "African American",
3: "American Indian",
4: "Asian alone",
5: "Native Hawaiian",
6: "Other Race alone",
7: "Two or More",
}
race2id = {v: k for k, v in id2race.items()}
mappings = {}
mappings_hover = {}
mapbox_style = "carto-darkmatter"
def set_projection_bounds(df_d):
transformer_4326_to_3857 = Transformer.from_crs("epsg:4326", "epsg:3857")
def epsg_4326_to_3857(coords):
return [transformer_4326_to_3857.transform(*reversed(row)) for row in coords]
transformer_3857_to_4326 = Transformer.from_crs("epsg:3857", "epsg:4326")
def epsg_3857_to_4326(coords):
return [
list(reversed(transformer_3857_to_4326.transform(*row))) for row in coords
]
data_3857 = (
[df_d.easting.min(), df_d.northing.min()],
[df_d.easting.max(), df_d.northing.max()],
)
data_center_3857 = [
[
(data_3857[0][0] + data_3857[1][0]) / 2.0,
(data_3857[0][1] + data_3857[1][1]) / 2.0,
]
]
data_4326 = epsg_3857_to_4326(data_3857)
data_center_4326 = epsg_3857_to_4326(data_center_3857)
return data_3857, data_center_3857, data_4326, data_center_4326
# Build Dash app and initial layout
def blank_fig(height):
"""
Build blank figure with the requested height
Args:
height: height of blank figure in pixels
Returns:
Figure dict
"""
return {
"data": [],
"layout": {
"height": height,
"template": template,
"xaxis": {"visible": False},
"yaxis": {"visible": False},
},
}
# Plot functions
def build_colorscale(colorscale_name, transform):
"""
Build plotly colorscale
Args:
colorscale_name: Name of a colorscale from the plotly.colors.sequential module
transform: Transform to apply to colors scale. One of 'linear', 'sqrt', 'cbrt',
or 'log'
Returns:
Plotly color scale list
"""
global colors, mappings
colors_temp = getattr(palettes, colorscale_name)
if transform == "linear":
scale_values = np.linspace(0, 1, len(colors_temp))
elif transform == "sqrt":
scale_values = np.linspace(0, 1, len(colors_temp)) ** 2
elif transform == "cbrt":
scale_values = np.linspace(0, 1, len(colors_temp)) ** 3
elif transform == "log":
scale_values = (10 ** np.linspace(0, 1, len(colors_temp)) - 1) / 9
else:
raise ValueError("Unexpected colorscale transform")
return [(v, clr) for v, clr in zip(scale_values, colors_temp)]
def get_min_max(df, col):
if dask_cudf and isinstance(df, dask_cudf.core.DataFrame):
return (df[col].min().compute(), df[col].max().compute())
return (df[col].min(), df[col].max())
def build_datashader_plot(
df,
colorscale_name,
colorscale_transform,
new_coordinates,
position,
x_range,
y_range,
view_name,
):
# global data_3857, data_center_3857, data_4326, data_center_4326
x0, x1 = x_range
y0, y1 = y_range
datashader_color_scale = {}
cvs = ds.Canvas(plot_width=3840, plot_height=2160, x_range=x_range, y_range=y_range)
colorscale_transform = "linear"
if view_name == "race":
aggregate_column = "race"
aggregate = "mean"
elif view_name == "total":
aggregate_column = "net"
aggregate = "count"
colorscale_name = "Viridis10"
elif view_name == "in":
aggregate_column = "net"
aggregate = "count"
colorscale_name = "PuBu9"
elif view_name == "stationary":
aggregate_column = "net"
aggregate = "count"
colorscale_name = "Greens9"
elif view_name == "out":
aggregate_column = "net"
aggregate = "count"
colorscale_name = "RdPu9"
else: # net
aggregate_column = "net"
aggregate = "mean"
if aggregate == "mean":
datashader_color_scale["color_key"] = colors[aggregate_column]
datashader_color_scale["how"] = "log"
else:
datashader_color_scale["cmap"] = [
i[1] for i in build_colorscale(colorscale_name, colorscale_transform)
]
datashader_color_scale["how"] = "log"
agg = cvs.points(
df,
x="easting",
y="northing",
agg=getattr(ds, aggregate)(aggregate_column),
)
cmin = cp.asnumpy(agg.min().data)
cmax = cp.asnumpy(agg.max().data)
# Count the number of selected towers
temp = agg.sum()
temp.data = cp.asnumpy(temp.data)
n_selected = int(temp)
if n_selected == 0:
# Nothing to display
lat = [None]
lon = [None]
customdata = [None]
marker = {}
layers = []
else:
img = tf.shade(
tf.dynspread(agg, threshold=0.7),
**datashader_color_scale,
).to_pil()
# img = tf.shade(agg,how='log',**datashader_color_scale).to_pil()
# Add image as mapbox image layer. Note that as of version 4.4, plotly will
# automatically convert the PIL image object into a base64 encoded png string
layers = [
{
"sourcetype": "image",
"source": img,
"coordinates": new_coordinates,
}
]
# Do not display any mapbox markers
lat = [None]
lon = [None]
customdata = [None]
marker = {}
# Build map figure
map_graph = {
"data": [],
"layout": {
"template": template,
"uirevision": True,
"mapbox": {
"style": mapbox_style,
"layers": layers,
"center": {
"lon": -78.81063494489342,
"lat": 37.471878534555074,
},
"zoom": 3,
},
"margin": {"r": 140, "t": 0, "l": 0, "b": 0},
"height": 500,
"shapes": [
{
"type": "rect",
"xref": "paper",
"yref": "paper",
"x0": 0,
"y0": 0,
"x1": 1,
"y1": 1,
"line": {
"width": 1,
"color": "#191a1a",
},
}
],
},
}
if aggregate == "mean":
# for `Age By PurBlue` category
if view_name == "race":
colorscale = [0, 1]
marker = dict(
size=0,
showscale=True,
colorbar={
"title": {
"text": "Race",
"side": "right",
"font": {"size": 14},
},
"tickvals": [
(0 + 0.5) / 7,
(1 + 0.5) / 7,
(2 + 0.5) / 7,
(3 + 0.5) / 7,
(4 + 0.5) / 7,
(5 + 0.5) / 7,
(6 + 0.5) / 7,
],
"ticktext": [
"White",
"African American",
"American Indian",
"Asian alone",
"Native Hawaiian",
"Other Race alone",
"Two or More",
],
"ypad": 30,
},
colorscale=[
(0 / 7, colors["race"][1]),
(1 / 7, colors["race"][1]),
(1 / 7, colors["race"][2]),
(2 / 7, colors["race"][2]),
(2 / 7, colors["race"][3]),
(3 / 7, colors["race"][3]),
(3 / 7, colors["race"][4]),
(4 / 7, colors["race"][4]),
(4 / 7, colors["race"][5]),
(5 / 7, colors["race"][5]),
(5 / 7, colors["race"][6]),
(6 / 7, colors["race"][6]),
(6 / 7, colors["race"][7]),
(7 / 7, colors["race"][7]),
(7 / 7, colors["race"][7]),
],
cmin=0,
cmax=1,
) # end of marker
else:
colorscale = [0, 1]
marker = dict(
size=0,
showscale=True,
colorbar={
"title": {
"text": "Migration",
"side": "right",
"font": {"size": 14},
},
"tickvals": [(0 + 0.5) / 3, (1 + 0.5) / 3, (2 + 0.5) / 3],
"ticktext": ["Out", "Stationary", "In"],
"ypad": 30,
},
colorscale=[
(0 / 3, colors["net"][-1]),
(1 / 3, colors["net"][-1]),
(1 / 3, colors["net"][0]),
(2 / 3, colors["net"][0]),
(2 / 3, colors["net"][1]),
(3 / 3, colors["net"][1]),
],
cmin=0,
cmax=1,
) # end of marker
map_graph["data"].append(
{
"type": "scattermapbox",
"lat": lat,
"lon": lon,
"customdata": customdata,
"marker": marker,
"hoverinfo": "none",
}
)
map_graph["layout"]["annotations"] = []
else:
marker = dict(
size=0,
showscale=True,
colorbar={
"title": {
"text": "Population",
"side": "right",
"font": {"size": 14},
},
"ypad": 30,
},
colorscale=build_colorscale(colorscale_name, colorscale_transform),
cmin=cmin,
cmax=cmax,
) # end of marker
map_graph["data"].append(
{
"type": "scattermapbox",
"lat": lat,
"lon": lon,
"customdata": customdata,
"marker": marker,
"hoverinfo": "none",
}
)
map_graph["layout"]["mapbox"].update(position)
return map_graph
def query_df_range_lat_lon(df, x0, x1, y0, y1, x, y):
mask_ = (df[x] >= x0) & (df[x] <= x1) & (df[y] <= y0) & (df[y] >= y1)
if mask_.sum() != len(df):
df = df[mask_]
if isinstance(df, cudf.DataFrame):
df.index = cudf.RangeIndex(0, len(df))
else:
df.index = pd.RangeIndex(0, len(df))
del mask_
return df
def bar_selected_ids(selection, column): # select ids for each column
if (column == "county_top") | (column == "county_bottom"):
selected_ids = [county2id[p["label"]] for p in selection["points"]]
else:
selected_ids = [race2id[p["label"]] for p in selection["points"]]
return selected_ids
def query_df_selected_ids(df, col, selected_ids):
if (col == "county_top") | (col == "county_bottom"):
col = "county"
return df[df[col].isin(selected_ids)]
def no_data_figure():
return {
"data": [
{
"title": {"text": "Query Result"},
"text": "SOME RANDOM",
"marker": {"text": "NO"},
}
],
"layout": {
"height": 250,
"template": template,
"xaxis": {"visible": False},
"yaxis": {"visible": False},
},
}
def build_histogram_default_bins(
df,
column,
selections,
orientation,
colorscale_name,
colorscale_transform,
view_name,
flag,
):
if (view_name == "out") & (column == "race"):
return no_data_figure()
global race2color
if (column == "county_top") | (column == "county_bottom"):
column = "county"
if dask_cudf and isinstance(df, dask_cudf.core.DataFrame):
df = df[[column, "net"]].groupby(column)["net"].count().compute().to_pandas()
elif isinstance(df, cudf.DataFrame):
df = df[[column, "net"]].groupby(column)["net"].count().to_pandas()
elif isinstance(df, dd.core.DataFrame):
df = df[[column, "net"]].groupby(column)["net"].count().compute()
else:
df = df[[column, "net"]].groupby(column)["net"].count()
df = df.sort_values(ascending=False) # sorted grouped ids by counts
if (flag == "top") | (flag == "bottom"):
if flag == "top":
view = df.iloc[:15]
else:
view = df.iloc[-15:]
names = [id2county[cid] for cid in view.index.values]
else:
view = df
names = [id2race[rid] for rid in view.index.values]
bin_edges = names
counts = view.values
mapping_options = {}
xaxis_labels = {}
if column in mappings:
if column in mappings_hover:
mapping_options = {
"text": list(mappings_hover[column].values()),
"hovertemplate": "%{text}: %{y} <extra></extra>",
}
else:
mapping_options = {
"text": list(mappings[column].values()),
"hovertemplate": "%{text} : %{y} <extra></extra>",
}
xaxis_labels = {
"tickvals": list(mappings[column].keys()),
"ticktext": list(mappings[column].values()),
}
if view_name == "total":
bar_color = counts
bar_scale = build_colorscale("Viridis10", colorscale_transform)
elif view_name == "in":
bar_color = counts
bar_scale = build_colorscale("PuBu9", colorscale_transform)
elif view_name == "stationary":
bar_color = counts
bar_scale = build_colorscale("Greens9", colorscale_transform)
elif view_name == "out":
bar_color = counts
bar_scale = build_colorscale("RdPu9", colorscale_transform)
elif view_name == "race":
if column == "race":
bar_color = [race2color[race] for race in names]
else:
bar_color = "#2C718E"
bar_scale = None
else: # net
bar_color = "#2C718E"
bar_scale = None
fig = {
"data": [
{
"type": "bar",
"x": bin_edges,
"y": counts,
"marker": {"color": bar_color, "colorscale": bar_scale},
**mapping_options,
}
],
"layout": {
"yaxis": {
"type": "linear",
"title": {"text": "Count"},
},
"xaxis": {**xaxis_labels},
"selectdirection": "h",
"dragmode": "select",
"template": template,
"uirevision": True,
"hovermode": "closest",
},
}
if column not in selections:
fig["data"][0]["selectedpoints"] = False
return fig
def cull_empty_partitions(df):
ll = list(df.map_partitions(len).compute())
df_delayed = df.to_delayed()
df_delayed_new = list()
pempty = None
for ix, n in enumerate(ll):
if 0 == n:
pempty = df.get_partition(ix)
else:
df_delayed_new.append(df_delayed[ix])
if pempty is not None:
df = dd.from_delayed(df_delayed_new, meta=pempty)
return df
def build_updated_figures_dask(
df,
relayout_data,
selected_map,
selected_race,
selected_county_top,
selected_county_bottom,
colorscale_name,
data_3857,
data_center_3857,
data_4326,
data_center_4326,
coordinates_4326_backup,
position_backup,
view_name,
):
colorscale_transform = "linear"
selected = {}
selected = {
col: bar_selected_ids(sel, col)
for col, sel in zip(
["race", "county_top", "county_bottom"],
[selected_race, selected_county_top, selected_county_bottom],
)
if sel and sel.get("points", [])
}
if relayout_data is not None:
transformer_4326_to_3857 = Transformer.from_crs("epsg:4326", "epsg:3857")
def epsg_4326_to_3857(coords):
return [transformer_4326_to_3857.transform(*reversed(row)) for row in coords]
coordinates_4326 = relayout_data and relayout_data.get("mapbox._derived", {}).get(
"coordinates", None
)
dragmode = (
relayout_data
and "dragmode" in relayout_data
and coordinates_4326_backup is not None
)
if dragmode:
coordinates_4326 = coordinates_4326_backup
coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
position = position_backup
elif coordinates_4326:
lons, lats = zip(*coordinates_4326)
lon0, lon1 = max(min(lons), data_4326[0][0]), min(max(lons), data_4326[1][0])
lat0, lat1 = max(min(lats), data_4326[0][1]), min(max(lats), data_4326[1][1])
coordinates_4326 = [
[lon0, lat0],
[lon1, lat1],
]
coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
coordinates_4326_backup = coordinates_4326
position = {
"zoom": relayout_data.get("mapbox.zoom", None),
"center": relayout_data.get("mapbox.center", None),
}
position_backup = position
else:
position = {
"zoom": 3.3350828189345934,
"pitch": 0,
"bearing": 0,
"center": {
"lon": -100.55828959790324,
"lat": 38.68323453274175,
}, # {'lon': data_center_4326[0][0]-100, 'lat': data_center_4326[0][1]-10}
}
coordinates_3857 = data_3857
coordinates_4326 = data_4326
new_coordinates = [
[coordinates_4326[0][0], coordinates_4326[1][1]],
[coordinates_4326[1][0], coordinates_4326[1][1]],
[coordinates_4326[1][0], coordinates_4326[0][1]],
[coordinates_4326[0][0], coordinates_4326[0][1]],
]
x_range, y_range = zip(*coordinates_3857)
x0, x1 = x_range
y0, y1 = y_range
if selected_map is not None:
coordinates_4326 = selected_map["range"]["mapbox"]
coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
x_range_t, y_range_t = zip(*coordinates_3857)
x0, x1 = x_range_t
y0, y1 = y_range_t
df = df.map_partitions(
query_df_range_lat_lon, x0, x1, y0, y1, "easting", "northing"
).persist()
# Select points as per view
if (view_name == "total") | (view_name == "race"):
df = df[(df["net"] == 0) | (df["net"] == 1)]
# df['race'] = df['race'].astype('category')
elif view_name == "in":
df = df[df["net"] == 1]
df["net"] = df["net"].astype("int8")
elif view_name == "stationary":
df = df[df["net"] == 0]
df["net"] = df["net"].astype("int8")
elif view_name == "out":
df = df[df["net"] == -1]
df["net"] = df["net"].astype("int8")
else: # net migration condition
df = df
# df["net"] = df["net"].astype("category")
for col in selected:
df = df.map_partitions(query_df_selected_ids, col, selected[col])
# cull empty partitions
df = cull_empty_partitions(df).persist()
datashader_plot = build_datashader_plot(
df,
colorscale_name,
colorscale_transform,
new_coordinates,
position,
x_range,
y_range,
view_name,
)
# Build indicator figure
n_selected_indicator = {
"data": [
{
"domain": {"x": [0.21, 0.41], "y": [0, 0.5]},
"title": {"text": "Data Size"},
"type": "indicator",
"value": len(df),
"number": {
"font": {"color": text_color, "size": "50px"},
"valueformat": ",",
"suffix": " rows",
},
},
],
"layout": {
"template": template,
"height": row_heights[3],
# 'margin': {'l': 0, 'r': 0,'t': 5, 'b': 5}
},
}
race_histogram = build_histogram_default_bins(
df,
"race",
selected,
"v",
colorscale_name,
colorscale_transform,
view_name,
flag="All",
)
county_top_histogram = build_histogram_default_bins(
df,
"county",
selected,
"v",
colorscale_name,
colorscale_transform,
view_name,
flag="top",
)
county_bottom_histogram = build_histogram_default_bins(
df,
"county",
selected,
"v",
colorscale_name,
colorscale_transform,
view_name,
flag="bottom",
)
del df
return (
datashader_plot,
county_top_histogram,
county_bottom_histogram,
race_histogram,
n_selected_indicator,
coordinates_4326_backup,
position_backup,
)
def build_updated_figures(
df,
relayout_data,
selected_map,
selected_race,
selected_county_top,
selected_county_bottom,
colorscale_name,
data_3857,
data_center_3857,
data_4326,
data_center_4326,
coordinates_4326_backup,
position_backup,
view_name,
):
colorscale_transform = "linear"
selected = {}
selected = {
col: bar_selected_ids(sel, col)
for col, sel in zip(
["race", "county_top", "county_bottom"],
[selected_race, selected_county_top, selected_county_bottom],
)
if sel and sel.get("points", [])
}
if relayout_data is not None:
transformer_4326_to_3857 = Transformer.from_crs("epsg:4326", "epsg:3857")
def epsg_4326_to_3857(coords):
return [transformer_4326_to_3857.transform(*reversed(row)) for row in coords]
coordinates_4326 = relayout_data and relayout_data.get("mapbox._derived", {}).get(
"coordinates", None
)
dragmode = (
relayout_data
and "dragmode" in relayout_data
and coordinates_4326_backup is not None
)
if dragmode:
coordinates_4326 = coordinates_4326_backup
coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
position = position_backup
elif coordinates_4326:
lons, lats = zip(*coordinates_4326)
lon0, lon1 = max(min(lons), data_4326[0][0]), min(max(lons), data_4326[1][0])
lat0, lat1 = max(min(lats), data_4326[0][1]), min(max(lats), data_4326[1][1])
coordinates_4326 = [
[lon0, lat0],
[lon1, lat1],
]
coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
coordinates_4326_backup = coordinates_4326
position = {
"zoom": relayout_data.get("mapbox.zoom", None),
"center": relayout_data.get("mapbox.center", None),
}
position_backup = position
else:
position = {
"zoom": 3.3350828189345934,
"pitch": 0,
"bearing": 0,
"center": {
"lon": -100.55828959790324,
"lat": 38.68323453274175,
}, # {'lon': data_center_4326[0][0]-100, 'lat': data_center_4326[0][1]-10}
}
coordinates_3857 = data_3857
coordinates_4326 = data_4326
new_coordinates = [
[coordinates_4326[0][0], coordinates_4326[1][1]],
[coordinates_4326[1][0], coordinates_4326[1][1]],
[coordinates_4326[1][0], coordinates_4326[0][1]],
[coordinates_4326[0][0], coordinates_4326[0][1]],
]
x_range, y_range = zip(*coordinates_3857)
x0, x1 = x_range
y0, y1 = y_range
if selected_map is not None:
coordinates_4326 = selected_map["range"]["mapbox"]
coordinates_3857 = epsg_4326_to_3857(coordinates_4326)
x_range_t, y_range_t = zip(*coordinates_3857)
x0, x1 = x_range_t
y0, y1 = y_range_t
df = query_df_range_lat_lon(df, x0, x1, y0, y1, "easting", "northing")
# Select points as per view
if (view_name == "total") | (view_name == "race"):
df = df[(df["net"] == 0) | (df["net"] == 1)]
df["net"] = df["net"].astype("int8")
# df['race'] = df['race'].astype('category')
elif view_name == "in":
df = df[df["net"] == 1]
df["net"] = df["net"].astype("int8")
elif view_name == "stationary":
df = df[df["net"] == 0]
df["net"] = df["net"].astype("int8")
elif view_name == "out":
df = df[df["net"] == -1]
df["net"] = df["net"].astype("int8")
else: # net migration condition
df = df
# df["net"] = df["net"].astype("category")
for col in selected:
df = query_df_selected_ids(df, col, selected[col])
datashader_plot = build_datashader_plot(
df,
colorscale_name,
colorscale_transform,
new_coordinates,
position,
x_range,
y_range,
view_name,
)
# Build indicator figure
n_selected_indicator = {
"data": [
{
"domain": {"x": [0.2, 0.45], "y": [0, 0.5]},
"title": {"text": "Data Size"},
"type": "indicator",
"value": len(df),
"number": {
"font": {"color": text_color, "size": "50px"},
"valueformat": ",",
"suffix": " rows",
},
},
],
"layout": {
"template": template,
"height": row_heights[3],
# 'margin': {'l': 0, 'r': 0,'t': 5, 'b': 5}
},
}
race_histogram = build_histogram_default_bins(
df,
"race",
selected,
"v",
colorscale_name,
colorscale_transform,
view_name,
flag="All",
)
county_top_histogram = build_histogram_default_bins(
df,
"county",
selected,
"v",
colorscale_name,
colorscale_transform,
view_name,
flag="top",
)
county_bottom_histogram = build_histogram_default_bins(
df,
"county",
selected,
"v",
colorscale_name,
colorscale_transform,
view_name,
flag="bottom",
)
del df
return (
datashader_plot,
race_histogram,
county_top_histogram,
county_bottom_histogram,
n_selected_indicator,
coordinates_4326_backup,
position_backup,
)
def check_dataset(dataset_url, data_path):
if not os.path.exists(data_path):
print(
f"Dataset not found at " + data_path + ".\n"
f"Downloading from {dataset_url}"
)
# Download dataset to data directory
os.makedirs("../data", exist_ok=True)
with requests.get(dataset_url, stream=True) as r:
r.raise_for_status()
with open(data_path, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
print("Download completed!")
else:
print(f"Found dataset at {data_path}")
def load_dataset(path, dtype="dask_cudf"):
"""
Args:
path: Path to arrow file containing mortgage dataset
Returns:
pandas DataFrame
"""
if os.path.isdir(path):
path = path + "/*"
if dtype == "dask":
return dd.read_parquet(path, split_row_groups=True)
elif dask_cudf and dtype == "dask_cudf":
return dask_cudf.read_parquet(path, split_row_groups=True)
elif dtype == "pandas":
return cudf.read_parquet(path).to_pandas()
return cudf.read_parquet(path)
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/plotly_demo/assets/s1.css | /* Table of contents
––––––––––––––––––––––––––––––––––––––––––––––––––
- Plotly.js
- Grid
- Base Styles
- Typography
- Links
- Buttons
- Forms
- Lists
- Code
- Tables
- Spacing
- Utilities
- Clearing
- Media Queries
*/
/* Grid
–––––––––––––––––––––––––––––––––––––––––––––––––– */
.container {
position: relative;
width: 100%;
max-width: 960px;
margin: 0 auto;
padding: 0 20px;
box-sizing: border-box; }
.column,
.columns {
width: 100%;
float: left;
box-sizing: border-box; }
/* For devices larger than 400px */
@media (min-width: 400px) {
.container {
width: 85%;
padding: 0; }
}
/* For devices larger than 550px */
@media (min-width: 550px) {
.container {
width: 80%;
}
.column,
.columns {
margin-left: 4%; }
.column:first-child,
.columns:first-child {
margin-left: 0; }
.one.column,
.one.columns { width: 4.66666666667%; }
.two.columns { width: 13.3333333333%; }
.three.columns { width: 22%; }
.four.columns { width: 30.6666666667%; }
.five.columns { width: 39.3333333333%; }
.six.columns { width: 48%; }
.seven.columns { width: 56.6666666667%; }
.eight.columns { width: 65.3333333333%; }
.nine.columns { width: 74.0%; }
.ten.columns { width: 82.6666666667%; }
.eleven.columns { width: 91.3333333333%; }
.twelve.columns { width: 98%; margin-left: 0; margin-right: 0;}
.one-third.column { width: 32%; margin-right: 0.5;}
.one-third.column:last-child { margin-right: 0;}
.two-thirds.column { width: 65.3333333333%; }
.one-half.column { width: 48%; }
/* Offsets */
.offset-by-one.column,
.offset-by-one.columns { margin-left: 8.66666666667%; }
.offset-by-two.column,
.offset-by-two.columns { margin-left: 17.3333333333%; }
.offset-by-three.column,
.offset-by-three.columns { margin-left: 26%; }
.offset-by-four.column,
.offset-by-four.columns { margin-left: 34.6666666667%; }
.offset-by-five.column,
.offset-by-five.columns { margin-left: 43.3333333333%; }
.offset-by-six.column,
.offset-by-six.columns { margin-left: 52%; }
.offset-by-seven.column,
.offset-by-seven.columns { margin-left: 60.6666666667%; }
.offset-by-eight.column,
.offset-by-eight.columns { margin-left: 69.3333333333%; }
.offset-by-nine.column,
.offset-by-nine.columns { margin-left: 78.0%; }
.offset-by-ten.column,
.offset-by-ten.columns { margin-left: 86.6666666667%; }
.offset-by-eleven.column,
.offset-by-eleven.columns { margin-left: 95.3333333333%; }
.offset-by-one-third.column,
.offset-by-one-third.columns { margin-left: 34.6666666667%; }
.offset-by-two-thirds.column,
.offset-by-two-thirds.columns { margin-left: 69.3333333333%; }
.offset-by-one-half.column,
.offset-by-one-half.columns { margin-left: 52%; }
}
/* Base Styles
–––––––––––––––––––––––––––––––––––––––––––––––––– */
/* NOTE
html is set to 62.5% so that all the REM measurements throughout Skeleton
are based on 10px sizing. So basically 1.5rem = 15px :) */
html {
font-size: 62.5%; }
body {
font-size: 1.5em; /* currently ems cause chrome bug misinterpreting rems on body element */
line-height: 1.6;
font-weight: 400;
font-family: "Open Sans", "HelveticaNeue", "Helvetica Neue", Helvetica, Arial, sans-serif;
color: #cfd8dc; /* Material blue-grey 100 */
background-color: #191a1a; /* Material blue-grey 900*/
margin: 2%;
}
/* Typography
–––––––––––––––––––––––––––––––––––––––––––––––––– */
h1, h2, h3, h4, h5, h6 {
margin-top: 0;
margin-bottom: 0;
font-weight: 300; }
h1 { font-size: 3.2rem; line-height: 1.2; letter-spacing: -.1rem; margin-bottom: 2rem; }
h2 { font-size: 3.0rem; line-height: 1.25; letter-spacing: -.1rem; margin-bottom: 1.8rem; margin-top: 1.8rem;}
h3 { font-size: 2.7rem; line-height: 1.3; letter-spacing: -.1rem; margin-bottom: 1.5rem; margin-top:1.5rem;}
h4 { font-size: 2.4rem; line-height: 1.35; letter-spacing: -.08rem; margin-bottom: 1.2rem; margin-top: 1.2rem;}
h5 { font-size: 2.0rem; line-height: 1.5; letter-spacing: -.05rem; margin-bottom: 0.6rem; margin-top: 0.6rem;}
h6 { font-size: 2.0rem; line-height: 1.6; letter-spacing: 0; margin-bottom: 0.75rem; margin-top: 0.75rem;}
p {
margin-top: 0; }
/* Blockquotes
–––––––––––––––––––––––––––––––––––––––––––––––––– */
blockquote {
border-left: 4px lightgrey solid;
padding-left: 1rem;
margin-top: 2rem;
margin-bottom: 2rem;
margin-left: 0rem;
}
/* Links
–––––––––––––––––––––––––––––––––––––––––––––––––– */
a {
color: #1565c0; /* Material Blue 800 */
text-decoration: underline;
cursor: pointer;}
a:hover {
color: #0d47a1; /* Material Blue 900 */
}
/* Buttons
–––––––––––––––––––––––––––––––––––––––––––––––––– */
.button,
button,
input[type="submit"],
input[type="reset"],
input[type="button"] {
display: inline-block;
height: 38px;
padding: 0 30px;
color: #90a4ae; /* Material blue-gray 300*/
text-align: center;
font-size: 11px;
font-weight: 600;
line-height: 38px;
letter-spacing: .1rem;
text-transform: uppercase;
text-decoration: none;
white-space: nowrap;
background-color: transparent;
border-radius: 4px;
border: 1px solid #90a4ae; /* Material blue-gray 300*/
cursor: pointer;
box-sizing: border-box; }
.button:hover,
button:hover,
input[type="submit"]:hover,
input[type="reset"]:hover,
input[type="button"]:hover,
.button:focus,
button:focus,
input[type="submit"]:focus,
input[type="reset"]:focus,
input[type="button"]:focus {
color: #cfd8dc;
border-color: #cfd8dc;
outline: 0; }
.button.button-primary,
button.button-primary,
input[type="submit"].button-primary,
input[type="reset"].button-primary,
input[type="button"].button-primary {
color: #FFF;
background-color: #33C3F0;
border-color: #33C3F0; }
.button.button-primary:hover,
button.button-primary:hover,
input[type="submit"].button-primary:hover,
input[type="reset"].button-primary:hover,
input[type="button"].button-primary:hover,
.button.button-primary:focus,
button.button-primary:focus,
input[type="submit"].button-primary:focus,
input[type="reset"].button-primary:focus,
input[type="button"].button-primary:focus {
color: #FFF;
background-color: #1EAEDB;
border-color: #1EAEDB; }
/* Forms
–––––––––––––––––––––––––––––––––––––––––––––––––– */
input[type="email"],
input[type="number"],
input[type="search"],
input[type="text"],
input[type="tel"],
input[type="url"],
input[type="password"],
textarea,
select {
height: 38px;
padding: 6px 10px; /* The 6px vertically centers text on FF, ignored by Webkit */
background-color: #fff;
border: 1px solid #D1D1D1;
border-radius: 4px;
box-shadow: none;
box-sizing: border-box;
font-family: inherit;
font-size: inherit; /*https://stackoverflow.com/questions/6080413/why-doesnt-input-inherit-the-font-from-body*/}
/* Removes awkward default styles on some inputs for iOS */
input[type="email"],
input[type="number"],
input[type="search"],
input[type="text"],
input[type="tel"],
input[type="url"],
input[type="password"],
textarea {
-webkit-appearance: none;
-moz-appearance: none;
appearance: none; }
textarea {
min-height: 65px;
padding-top: 6px;
padding-bottom: 6px; }
input[type="email"]:focus,
input[type="number"]:focus,
input[type="search"]:focus,
input[type="text"]:focus,
input[type="tel"]:focus,
input[type="url"]:focus,
input[type="password"]:focus,
textarea:focus,
select:focus {
border: 1px solid #33C3F0;
outline: 0; }
label,
legend {
display: block;
margin-bottom: 0px; }
fieldset {
padding: 0;
border-width: 0; }
input[type="checkbox"],
input[type="radio"] {
display: inline; }
label > .label-body {
display: inline-block;
margin-left: .5rem;
font-weight: normal; }
/* Lists
–––––––––––––––––––––––––––––––––––––––––––––––––– */
ul {
list-style: circle inside; }
ol {
list-style: decimal inside; }
ol, ul {
padding-left: 0;
margin-top: 0; }
ul ul,
ul ol,
ol ol,
ol ul {
margin: 1.5rem 0 1.5rem 3rem;
font-size: 90%; }
li {
margin-bottom: 0;
}
/* Tables
–––––––––––––––––––––––––––––––––––––––––––––––––– */
table {
border-collapse: collapse;
}
th:not(.CalendarDay),
td:not(.CalendarDay) {
padding: 4px 10px;
text-align: left;
/*border-bottom: 1px solid #E1E1E1;*/
}
th:first-child:not(.CalendarDay),
td:first-child:not(.CalendarDay) {
padding-left: 0; }
th:last-child:not(.CalendarDay),
/*td:last-child:not(.CalendarDay) {*/
/* padding-right: 0; }*/
/* Spacing
–––––––––––––––––––––––––––––––––––––––––––––––––– */
button,
.button {
margin-bottom: 0rem; }
input,
textarea,
select,
fieldset {
margin-bottom: 0rem; }
pre,
dl,
figure,
table,
form {
margin-bottom: 0rem; }
p,
ul,
ol {
margin-bottom: 0.75rem; }
/* Utilities
–––––––––––––––––––––––––––––––––––––––––––––––––– */
.u-full-width {
width: 100%;
box-sizing: border-box; }
.u-max-full-width {
max-width: 100%;
box-sizing: border-box; }
.u-pull-right {
float: right; }
.u-pull-left {
float: left; }
/* Misc
–––––––––––––––––––––––––––––––––––––––––––––––––– */
hr {
margin-top: 3rem;
margin-bottom: 3.5rem;
border-width: 0;
border-top: 1px solid #E1E1E1; }
/* Clearing
–––––––––––––––––––––––––––––––––––––––––––––––––– */
/* Self Clearing Goodness */
.container:after,
.row:after,
.u-cf {
content: "";
display: table;
clear: both; }
/* Media Queries
–––––––––––––––––––––––––––––––––––––––––––––––––– */
/*
Note: The best way to structure the use of media queries is to create the queries
near the relevant code. For example, if you wanted to change the styles for buttons
on small devices, paste the mobile query code up in the buttons section and style it
there.
*/
/* Larger than mobile */
@media (min-width: 400px) {}
/* Larger than phablet (also point when grid becomes active) */
@media (min-width: 550px) {}
/* Larger than tablet */
@media (min-width: 750px) {}
/* Larger than desktop */
@media (min-width: 1000px) {}
/* Larger than Desktop HD */
@media (min-width: 1200px) {}
/* Pretty container
–––––––––––––––––––––––––––––––––––––––––––––––––– */
.pretty_container {
border-radius: 10px;
background-color: #000000; /* Mapbox light map land color */
margin: 0.5%;
margin-left: 1.5%;
padding: 1%;
position: relative;
box-shadow: 1px 1px 1px black;
}
.container_title {
margin-top: 0;
margin-bottom: 0.2em;
font-size: 2.6rem;
line-height: 2.6rem;
}
/* Special purpose buttons
–––––––––––––––––––––––––––––––––––––––––––––––––– */
.reset-button {
/* width: 100%; */
/* margin-top: 10px; */
margin-top: -5px;
height: 30px;
line-height: 30px;
float: right;
}
.info-icon {
float: right;
cursor: pointer;
}
/* Modal info layer
–––––––––––––––––––––––––––––––––––––––––––––––––– */
.modal {
position: fixed;
z-index: 1002; /* Sit on top, including modebar which has z=1001 */
left: 0;
top: 0;
width: 100%; /* Full width */
height: 100%; /* Full height */
background-color: rgba(0, 0, 0, 0.6); /* Black w/ opacity */
}
.modal-content {
z-index: 1004; /* Sit on top, including modebar which has z=1001 */
position: fixed;
left: 0;
width: 60%;
background-color: #3949ab; /* Material indigo 600 */
color: white;
border-radius: 5px;
margin-left: 20%;
margin-bottom: 2%;
margin-top: 2%;
}
.modal-content > div {
text-align: left;
margin: 15px;
}
.modal-content.bottom {
bottom: 0;
}
.modal-content.top {
top: 0;
}
/* Config pane
–––––––––––––––––––––––––––––––––––––––––––––––––– */
.config-label {
text-align: right !important;
}
.VirtualizedSelectOption {
color: #191a1a;
} | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/holoviews_demo/README.md | # Panel + Holoviews + RAPIDS | Census 2020 Race Migration Visualization

## Charts
1. Map chart shows the total migration points for chosen view and selected area
2. Top counties bar show the counties with most migration for chosen view and selected area
3. Net Race migration bar shows total inward and outward migration for chosen view and selected area
4. Population Distribution shows distribution of migration across blocks for chosen view and selected area
Cross-filtering is enabled to link all the four charts using box-select tool
## Race Views
The demo consists of eight views ( seven race views + one all-race view)
Options - All, White alone, African American alone, American Indian alone, Asian alone, Native Hawaiian alone, Other Race alone, Two or More races.
#### Snapshot examples
1. White race

2. Asian race

3. African american race

## Colormaps
User can select from select colormaps
Options - 'kbc', 'fire', 'bgy', 'bgyw', 'bmy', 'gray'.
## Limit
User can use slider to select how many top counties to show, from 5 to 50 at intervals of 5
# Installation and Run Steps
## Data
There is 1 main dataset:
- Net Migration Dataset ; Consists of Race Migration computed using Census 2020 and Census 2010 block data
For more information on how the Net Migration Dataset was prepared to show individual points, refer to the `/data_prep_net_migration` folder.
You can download the final net miragtion dataset [here](https://data.rapids.ai/viz-data/net_migration_dataset.parquet)
### Conda Env
Verify the following arguments in the `environment.yml` match your system(easy way to check `nvidia-smi`):
cudatoolkit: Version used is `11.5`
```bash
# setup conda environment
conda env create --name holoviews_env --file environment.yml
source activate holoviews_env
# run and access
cd holoviews_demo
jupyter lab
run `census_net_migration_demo.ipynb` notebook
```
## Dependencies
- python=3.9
- cudatoolkit=11.5
- rapids=22.08
- plotly=5.10.0
- jupyterlab=3.4.3
## FAQ and Known Issues
**What hardware do I need to run this locally?** To run you need an NVIDIA GPU with at least 24GB of memory, at least 32GB of system memory, and a Linux OS as defined in the [RAPIDS requirements](https://rapids.ai/start.html#req).
**How did you compute migration?** Migration was computed by comparing the block level population for census 2010 and 2020
**How did you compare population having block level boundary changes?** [Relationship Files](https://www.census.gov/geographies/reference-files/time-series/geo/relationship-files.html#t10t20) provides the 2010 Census Tabulation Block to 2020 Census Tabulation Block Relationship Files. Block relationships may be one-to-one, many-to-one, one-to-many, or many-to-many. Population count was computed in proportion to take into account the division and collation of blocks across 2010 and 2020.
**How did you determine race migration?** We took difference of race counts for census 2020 and census 2010. Individuals were randomly assigned race within a block so that they accurately add up at the block level.
**How did you get individual point locations?** The population density points are randomly placed within a census block and associated to match distribution counts at a census block level.
**How are the population and distributions filtered?** Use the box select tool icon for the map or click and drag for the bar charts.
**Why is the population data from 2010 and 2020?** Only census data is recorded on a block level, which provides the highest resolution population distributions available. For more details on census boundaries refer to the [TIGERweb app](https://tigerweb.geo.census.gov/tigerwebmain/TIGERweb_apps.html).
**The dashboard stop responding or the chart data disappeared!** This is likely caused by an Out of Memory Error and the application must be restarted.
**How do I request a feature or report a bug?** Create an [Issue](https://github.com/rapidsai/plotly-dash-rapids-census-demo/issues) and we will get to it asap.
## Acknowledgments and Data Sources
- 2020 Population Census and 2010 Population Census to compute Net Migration Dataset, used with permission from IPUMS NHGIS, University of Minnesota, [www.nhgis.org](https://www.nhgis.org/) ( not for redistribution ).
- Dashboard developed with [Panel](https://panel.holoviz.org/) and [Holoviews](https://holoviews.org/index.html)
- Geospatial point rendering developed with [Datashader](https://datashader.org/).
- GPU acceleration with [RAPIDS cudf](https://rapids.ai/) and [cupy](https://cupy.chainer.org/), CPU code with [pandas](https://pandas.pydata.org/).
- For source code and data workflow, visit our [GitHub](https://github.com/rapidsai/plotly-dash-rapids-census-demo/tree/census-2020).
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/holoviews_demo/census_net_migration_demo.ipynb | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "6"import pandas as pd, numpy as np
import dash
# from dash import html
from plotly.colors import sequential
from holoviews.element.tiles import CartoDark
import cudf, cupy
import sys,os,datetime,random
import geopandas as gpd
from shapely.geometry import Point
# pd.set_option('display.float_format','{:.0f}'.format)
from dash import Dash, dcc, html
import numpy as np
import pandas as pd
from dash.dependencies import Input, Output, State
# import plotly.express as px
# import plotly.graph_objects as go
import holoviews as hv
from holoviews.plotting.plotly.dash import to_dash
from holoviews.operation.datashader import datashade,rasterize,dynspread,shade
from holoviews import streams
import datashader.transfer_functions as tf
import datashader as ds
from pyproj import Transformer
from holoviews import streams
from holoviews import opts
from holoviews.streams import Stream, param, LinkedStream
import panel as pn
import dask_cudf
import colorcet as cc
import pickledata_path = '../data/net_migration_dataset.parquet'df = cudf.read_parquet(data_path).reset_index(drop=True)
df.head()id2county = pickle.load(open('../id2county.pkl','rb'))
county2id = { v:k for k,v in id2county.items()}# Prepare net_migration_dataset from census_migration_dataset
# df = cudf.from_pandas(df)
# df["easting"], df["northing"] = hv.Tiles.lon_lat_to_easting_northing(df["x"], df["y"])
# df.drop(columns=['x','y'],inplace=True)
# Data reduction
# df.drop(columns=['blockid'],inplace=True)
# df['race'] = df['race'].astype('int8')
# df['block_net'] = df['block_net'].astype('int8')
# df['county'] = df['county'].apply(lambda x: county2id[x])
# df['county'] = df['county'].astype('uint16')
# df[['easting','northing']] = df[['easting','northing']].astype('float32')
# df.head()def race_bar(ds,selected_race): #
if selected_race==0:
agg_df =filtered_df[filtered_df.block_net!=0].groupby('race')['block_net'].count().sort_values(ascending=False).reset_index()
agg_df['labels'] = agg_df['race'].to_pandas().apply(lambda x: 'Out' if x<0 else 'In')
kdim_label = hv.Dimension('labels', values = agg_df.to_pandas()['labels'].tolist())
return hv.Bars(data=agg_df,kdims=kdim_label,vdims=['block_net'])
else:
agg_df =filtered_df[filtered_df.race.abs()==selected_race].groupby('race')['block_net'].count().sort_values(ascending=False).reset_index()
agg_df['labels'] = agg_df['race'].to_pandas().apply(lambda x: 'Out' if x<0 else 'In')
kdim_label = hv.Dimension('labels', values = agg_df.to_pandas()['labels'].tolist())
return hv.Bars(data=agg_df,kdims=kdim_label,vdims=['block_net'])
# def race_bar(ds,selected_race): #
# if selected_race==0:
# agg_df =filtered_df[filtered_df.block_net!=0].groupby('block_net')['blockid'].count().reset_index()
# labels = hv.Dimension('block_net', values = ['Out','In'])
# return hv.Bars(data=agg_df,kdims=['block_net'],vdims=['blockid'])
# else:
# agg_df =filtered_df[filtered_df.race.abs()==selected_race].groupby('race')['blockid'].count().reset_index()
# labels = hv.Dimension('race', values = ['Out','In'])
# return hv.Bars(data=agg_df,kdims=['race'],vdims=['blockid'])
def bars(ds,limit,selected_race):
if selected_race==0:
agg_df = filtered_df.groupby('county')['block_net'].count().sort_values(ascending=False).reset_index()[:limit]
agg_df['labels'] = agg_df['county'].to_pandas().apply(lambda x: id2county[x])
kdim_county = hv.Dimension('labels', values = agg_df.to_pandas()['labels'].tolist()[:limit])
return hv.Bars(data=agg_df,kdims=kdim_county,vdims=['block_net'])#.opts(color='x',cmap=cmap_selector.value)
else:
agg_df = filtered_df[filtered_df.race.abs()==selected_race].groupby('county')['block_net'].count().sort_values(ascending=False).reset_index()[:limit]
agg_df['labels'] = agg_df['county'].to_pandas().apply(lambda x: id2county[x])
# print(agg_df.head())
kdim_county = hv.Dimension('labels', values = agg_df.to_pandas()['labels'].tolist()[:limit])
return hv.Bars(data=agg_df,kdims=kdim_county,vdims=['block_net'])
def filter_dataset(bounds,boundsx,b2):
# print("Entered data filter")
# print(bounds,boundsx,b2)
global bounds_prev
global boundsx_prev
global b2_prev
global filtered_df
global c
c+=1
if (bounds_prev!=bounds) & (bounds is not None):
bounds_prev = bounds
x0,y0,x1,y1 = bounds
filtered_df = filtered_df[(filtered_df.easting>=x0) & (filtered_df.easting<=x1) & (filtered_df.northing<=y0) & (filtered_df.northing>=y1)]
if (boundsx_prev!=boundsx) & (boundsx is not None):
boundsx_prev = boundsx
x_min, x_max = boundsx
filtered_df = filtered_df[(filtered_df.block_diff>=x_min) & (filtered_df.block_diff<=x_max)]
if (b2_prev!=b2) & (b2 is not None) :
b2_prev = b2
agg_df = filtered_df.groupby('county')['block_net'].count().sort_values(ascending=False).reset_index()
id_min, id_max = b2
id_min,id_max = max(0,round(id_min)), round(id_max)
# print(id_min,id_max+1)
agg_df = agg_df.iloc[id_min:id_max+1]
# print(selected_bar)
selected_counties = agg_df.to_pandas().county.tolist()
filtered_df = filtered_df[filtered_df['county'].isin(selected_counties)] # filter dataset to keep selected counties
return hv.Dataset(filtered_df)
def scatter(ds,selected_race):
# print("Entered scatter")
# print(len(ds))
# print(view_selection)
filtered_df = ds.data
if len(filtered_df)==0:
return hv.Points(df[:1], ["easting", "northing"],vdims='block_net').options(color='block_net') # Empty graph
if selected_race == 0:
return hv.Points(filtered_df, ["easting", "northing"],vdims='block_net').options(color='block_net')
else: return hv.Points(filtered_df[filtered_df.race.abs()==selected_race], ["easting", "northing"],vdims='block_net').options(color='block_net')
def histogram(ds,selected_race):
# print("Entered histogram")
# print(len(ds))
filtered_df = ds.data
if selected_race==0:
return hv.Histogram(np.histogram(filtered_df['block_diff'].to_pandas().values,bins=100))
else:
return hv.Histogram(np.histogram(filtered_df[filtered_df.race.abs()==selected_race]['block_diff'].to_pandas().values,bins=100))
def set_dragmode(plot, element):
fig = plot.state
fig['layout']['dragmode'] = "select"
if isinstance(element, hv.Histogram):
# Constrain histogram selection direction to horizontal
fig['layout']['selectdirection'] = "h"
fig['layout']['uirevision'] =False
bounds_prev = None
boundsx_prev = None
b2_prev = None
c= 0
filtered_df = df.copy()
tiles = CartoDark()
agg_df = filtered_df.groupby('county')['block_net'].count().sort_values(ascending=False).reset_index()
map_stream_box = streams.BoundsXY() # stream1
hist_stream_bx = streams.BoundsX()# stream 2
bar_stream_sel = streams.BoundsX().rename(boundsx='b2') # stream3
#widgets area
number = hv.Dimension('limit',values=[1,2,3,4])
dropdown = hv.Dimension('Order',values=['Top','Bottom'])
cmaps = {n: cc.palette[n] for n in ['kbc', 'fire', 'bgy', 'bgyw', 'bmy', 'gray', 'kbc']}
cmap_selector = pn.widgets.Select(name='Colormap', options=cmaps)
race_options = {'All':0,'White':1, 'African American':2,'American Indian and Alaska Native':3,'Asian':4,'Native Hawaiian and Other Pacific Islander':5,'Some Other Race':6,'Two or More Races':7 }
race_selector = pn.widgets.Select(name='Race', options=race_options)
filtered_ds = hv.DynamicMap(filter_dataset,streams=[map_stream_box,hist_stream_bx,bar_stream_sel])#.redim.range(limit=(3,7)) #kdims=[number]
# filtered_ds = hv.DynamicMap(filter_dataset,kdims=number,streams=[map_stream_box,hist_stream_bx,bar_stream_sel])
dynamic_points = filtered_ds.apply(scatter,selected_race=race_selector)
dynamic_img = datashade(dynamic_points,cmap=cmap_selector)
dynamic_histogram = filtered_ds.apply(histogram,selected_race=race_selector)
limit_slider = pn.widgets.IntSlider(name='Limit', value=10, start=5, end=50)
dynamic_bar = filtered_ds.apply(bars,limit=limit_slider,selected_race=race_selector)#.redim.range(time=(3,7))
dynamic_race = filtered_ds.apply(race_bar,selected_race=race_selector) #
dynamic_img.opts(opts.Scatter(hooks=[set_dragmode]))
dynamic_histogram.opts(opts.Histogram(hooks=[set_dragmode]))
dynamic_bar.opts(opts.Bars(hooks=[set_dragmode]))
dynamic_race.opts(opts.Bars(hooks=[set_dragmode]))
map_stream_box.source = dynamic_img
hist_stream_bx.source = dynamic_histogram
bar_stream_sel.source = dynamic_bar
# print(filtered_ds)
# print(dynamic_bar)
# map_overlay,hist_overlay,bar_overlay = (tiles*dynamic_points*img), (hist_base*dynamic_histogram), dynamic_bar #(agg_hist_base*dynamic_agg_histogram)
# print(overlay)
map_overlay,hist_overlay,bar_overlay,race_overlay = tiles*dynamic_img, dynamic_histogram , dynamic_bar,dynamic_race
# layout1 = (map_overlay.opts(height=600,width=800),hist_overlay + bar_overlay)
panel_dashboard = pn.Column(
pn.Row(pn.layout.Spacer(width=30),cmap_selector,race_selector),
pn.Row(map_overlay.opts(width=2000,height = 700)),
pn.Row(pn.layout.Spacer(width=30),limit_slider),
pn.Row(dynamic_bar.opts(width=2000//3),race_overlay.opts(width=2000//3),dynamic_histogram.opts(width =2000//3))
) #,race_overlay.opts(width=1000)panel_dashboard.servable(title='Census Migration Demo') | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/holoviews_demo/environment.yml | channels:
- rapidsai
- conda-forge
- nvidia
dependencies:
- python=3.9
- cudatoolkit=11.5
- rapids=22.08
- plotly=5.10.0
- jupyterlab=3.4.3
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population/README.md | # Total population dataset generation
## Order of execution
1. map_blocks_and_calc_population
2. gen_table_with_migration
3. gen_total_population_points_script
4. add_race_net_county_to_population
## Mappings:
### Net
<b>1</b>: Inward Migration</br>
<b>0</b>: Stationary</br>
<b>-1</b>: Outward Migration</br>
### Race
<b>0</b>: All</br>
<b>1</b>: White</br>
<b>2</b>: African American</br>
<b>3</b>: American Indian</br>
<b>4</b>: Asian alone</br>
<b>5</b>: Native Hawaiian</br>
<b>6</b>: Other Race alone</br>
<b>7</b>: Two or More</br>
### County
Mappings for counties can be found in `id2county.pkl` file from root directory.
### Final Dataset
You can download the final total population dataset [here](https://data.rapids.ai/viz-data/total_population_dataset.parquet)
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population/gen_total_population_points_script.ipynb | import pandas as pd
import geopandas as gpd
import ast,os,random
pd.set_option('display.float_format','{:.1f}'.format)
import warnings
warnings.filterwarnings('ignore')
import cudf, cupy as cp
import numpy as np
import time
import math
import sys,os,datetime,random
from shapely.geometry import Point
# pd.set_option('display.max_colwidth', -1)df = cudf.read_csv('data/total_population_gen_df.csv').drop('Unnamed: 0',axis=1)
df.head()# df = df[df.STATE==6]
# len(df)//3
# df= df.iloc[:len(df)//3]print(len(df))def random_points_in_polygon(number, polygon):
# print(polygon)
points_x = np.array([])
points_y = np.array([])
min_x, min_y, max_x, max_y = polygon.bounds
i= 0
while i < number:
point_x = random.uniform(min_x, max_x)
point_y = random.uniform(min_y, max_y)
if polygon.contains(Point(point_x, point_y)):
points_x = np.append(points_x, point_x)
points_y = np.append(points_y, point_y)
i += 1
return points_x, points_y # returns list of points(lat), list of points(long)
def generate_data(state, df_temp, gpdf):
t1 = datetime.datetime.now()
geoid_index_df = df_temp.index.to_numpy()
final_points_x = np.array([])
final_points_y = np.array([])
geoid = np.array([])
# Add additional features
county = np.array([])
p_delta = np.array([])
p_net = np.array([])
f=0
for index, row in gpdf.iterrows():
f+=1
points_x = np.array([])
points_y = np.array([])
geoid_temp = np.array([])
if row['GEOID20'] in geoid_index_df:
num_points = df_temp.loc[row['GEOID20']]
polygon = row['geometry']
#print(row['GEOID10'])
#print('SUCCESS')
num_points = df_temp.loc[row['GEOID20']] # store population
polygon = row['geometry']
if polygon is not None:
points_x, points_y = random_points_in_polygon(num_points, polygon)
# print(points_x,points_y)
geoid_temp = np.array([row['GEOID20']]*len(points_x))
geoid = np.append(geoid,geoid_temp)
final_points_x = np.append(final_points_x, points_x)
# print(final_points_x)
final_points_y = np.append(final_points_y, points_y)
print('Processing '+str(state)+' - Completed:', "{0:0.2f}".format((index/len(gpdf))*100), '%', end='')
print('', end='\r')
# if f==11:
# break
print('Processing for '+str(state)+' complete \n total time', datetime.datetime.now() - t1)
df_fin = cudf.DataFrame({'GEOID20': geoid,'x': final_points_x, 'y':final_points_y}) #,'COUNTY':county,'p_delta':p_delta,'p_net':p_net})
df_fin.GEOID20 = df_fin.GEOID20[1:].astype('int').astype('str')
df_fin.GEOID20 = df_fin.GEOID20.fillna(method='bfill')
df_fin.to_csv('data/total_population/population_%s_1'%str(state)+'.csv', index=False)
def exec_data(state_key_list):
c=0
for i in state_key_list:
print(i)
c+=1
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
# path = 'census_2020_data/nhgis0003_shape/nhgis0003_shapefile_tl2020_%s0_block_2020/%s_block_2020.shp'%(i_str,states[i])
path ='data/tl_shapefiles/tl_2021_%s_tabblock20.shp'%(i_str)
#print(path)
print("started reading shape file for state ", states[i])
if os.path.isfile(path):
gpdf = gpd.read_file(path)[['GEOID20', 'geometry']].sort_values('GEOID20').reset_index(drop=True)
gpdf.GEOID20 = gpdf.GEOID20.astype('int64')
gpdf = gpdf[(gpdf.GEOID20>=480019501001000) & (gpdf.GEOID20<=481439502032029)].reset_index(drop=True)
print("completed reading shape file for state ", states[i])
df_temp = df.query('STATE == @i')[['ID20', 'points']]
df_temp.index = df_temp.ID20
df_temp = df_temp['points']
# print(gpdf.head(3))
# print(df_temp)
print("starting to generate data for "+str(states[i])+"... ")
generate_data(states[i], df_temp, gpdf)
del(df_temp)
else:
print("shape file does not exist")
continue
# if c==2:
# break # states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
# 16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
# 28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
# 40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
# 54:"WV",55:"WI",56:"WY",72:"PR"}
# states = {6:"CA"}exec_data(states.keys())def merge_parts(state_key_list):
concat_states = cudf.DataFrame()
c=0
for i in state_key_list:
for c in range(1,4):
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path = 'data/total_population/population_%s_%s'%(str(states[i]),c)+'.csv'
# print(path)
if os.path.isfile(path):
temp = cudf.read_csv(path) # Load shape files
concat_states = cudf.concat([concat_states,temp])
else:
print("population file does not exist")
continue
return concat_statesconcat_parts = merge_parts(states)concat_parts =concat_parts.reset_index(drop=True)
concat_partsdf[df.STATE==48].points.sum()df.points.sum()concat_parts.to_pandas().to_csv('data/total_population/population_CA')def merge_shape_and_states(state_key_list):
concat_states = cudf.DataFrame()
for i in state_key_list:
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path = 'data/total_population/population_%s'%str(states[i])+'.csv'
if os.path.isfile(path):
temp = cudf.read_csv(path) # Load shape files
concat_states = cudf.concat([concat_states,temp])
else:
print(i)
print("population file does not exist")
continue
print(i)
return concat_states# states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
# 16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
# 28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
# 40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
# 54:"WV",55:"WI",56:"WY",72:"PR"}
states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
28:"MS"} # part1
states = {29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
54:"WV",55:"WI",56:"WY",72:"PR"} #part2indv_df = merge_shape_and_states(states.keys()).drop('Unnamed: 0',axis=1)
indv_df.rename(columns={'GEOID20':'ID20'},inplace=True)
indv_df.head()len(indv_df)# indv_df.to_pandas().to_parquet('data/total_part1.parquet')# indv_df.to_pandas().to_parquet('data/total_part2.parquet')# df1 = pd.read_parquet('data/total_part1.parquet')
# df2 = pd.read_parquet('data/total_part2.parquet')# merged = pd.concat([df1,df2])# len(merged)# gpu = cudf.from_pandas(merged)# merged.to_parquet('data/total_parts_combined.parquet')# dataset = indv_df.merge(df,on='ID20',how='left').sort_values('ID20')
# dataset.head() | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population/map_blocks_and_calc_population.ipynb | import pandas as pd
import geopandas as gpd
import ast,os,random
pd.set_option('display.float_format','{:.1f}'.format)
import warnings
warnings.filterwarnings('ignore')
import cudf, cupy as cp
import numpy as np
import time
# pd.set_option('display.max_colwidth', -1)full_2020_path='data/nhgis0007_csv/nhgis0007_ds248_2020_block.csv'
full_2010_path='data/nhgis0001_csv/nhgis0001_ds172_2010_block.csv'full = cudf.read_csv(full_2010_path, usecols=['GISJOIN', 'H7V001' ,'STATEA'],dtype={'GISJOIN':'str','H7V001':'str','STATEA':'int'})
# full.STATEA = full.STATEA.astype('int')# small = full[ (full.STATEA ==11) | (full.STATEA ==10)]
# data10 = small.copy()
data10 = full.copy()
del(full)data10.GISJOIN = data10.GISJOIN.str.replace('G', '')
data10.rename(columns={'H7V001':'P10','STATEA':'STATE'},inplace=True)
data10.head()len(data10)data10['ID10'] = (data10.GISJOIN.str.slice(start=0,stop=2) + data10.GISJOIN.str.slice(start=3,stop=6) + data10.GISJOIN.str.slice(start=7)).astype('int64')data10.head()full = cudf.read_csv(full_2020_path,usecols=['GEOCODE','GISJOIN','STATEA','COUNTY','U7B001'],dtype={'GEOCODE':'int64','COUNTY':'str','GISJOIN':'str','U7B001':'int32','STATEA':'int32'})
# full.STATEA = full.STATEA.astype('int')
# small = full[ (full.STATEA ==11) | (full.STATEA ==10)]
# data20 = cudf.from_pandas(small)
data20 = full.copy()
del(full)data20[data20.COUNTY=='Montgomery County'].STATEA.value_counts()# df = small.copy()
data20.rename(columns={'U7B001':'P20','GEOCODE':'ID20','STATEA':'STATE'},inplace=True)
data20.GISJOIN = data20.GISJOIN.str.replace('G', '').astype('int64')
data20.head()len(data20)states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
54:"WV",55:"WI",56:"WY",72:"PR"}
# states = {11:"DC",10:"DE"}
def concat_states_mapper(state_key_list):
concat_mapper = cudf.DataFrame()
for i in state_key_list:
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path = 'data/block_rel_files/tab2010_tab2020_st%s_%s.csv'%(i_str,states[i].lower())
if os.path.isfile(path):
mapper = cudf.read_csv(path,delimiter='|')
concat_mapper = cudf.concat([concat_mapper,mapper])
else:
print("mapper file does not exist")
continue
return concat_mappermapper_df = concat_states_mapper(states.keys())
mapper_df.rename(columns={'STATE_2010':'STATE_2010'},inplace=True)
mapper_df.head(2)mapper_df['ID10'] = (mapper_df.STATE_2010.astype('str').str.rjust(2,'0') + mapper_df.COUNTY_2010.astype('str').str.rjust(3,'0') + mapper_df.TRACT_2010.astype('str').str.rjust(6,'0') + mapper_df.BLK_2010.astype('str').str.rjust(4,'0')).astype('int64')
mapper_df['ID20'] = (mapper_df.STATE_2020.astype('str').str.rjust(2,'0') + mapper_df.COUNTY_2020.astype('str').str.rjust(3,'0') + mapper_df.TRACT_2020.astype('str').str.rjust(6,'0') + mapper_df.BLK_2020.astype('str').str.rjust(4,'0')).astype('int64')
mapper_df = mapper_df[['ID10','ID20','STATE_2020']].reset_index()
mapper_df.head()freq_df = mapper_df.ID10.value_counts().reset_index().sort_values('index')
freq_df.rename(columns={'ID10':'freq'},inplace=True)
freq_df.rename(columns={'index':'ID10'},inplace=True)
freq_df = freq_df.reset_index(drop=True)
data10.P10 = data10.P10.astype('float32')
freq_df = cudf.merge(freq_df,data10[['ID10','P10']],on='ID10',how='right').sort_values('ID10')
freq_df['weights'] = freq_df['P10'].divide(freq_df['freq'])
freq_df.head()weighted_mapper = cudf.merge(mapper_df,freq_df[['ID10','weights']],on='ID10',how='left').sort_values('ID20').reset_index(drop=True)eq_10 = weighted_mapper.groupby('ID20')['weights'].sum().reset_index().sort_values('ID20').reset_index(drop=True)
eq_10.head()weighted_mapper['eq_P10'] = eq_10['weights']
weighted_mapper.head()data20['eq_P10'] = weighted_mapper['eq_P10'].copy()
data20.head()# data20.to_csv('data/mapped_blocks_full.csv') | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population/gen_table_with_migration.ipynb | import pandas as pd
import geopandas as gpd
import ast,os,random
pd.set_option('display.float_format','{:.1f}'.format)
import warnings
warnings.filterwarnings('ignore')
import cudf, cupy as cp
import numpy as np
import time
import math
import pickle
# pd.set_option('display.max_colwidth', -1)df = pd.read_csv('data/mapped_blocks_full.csv',encoding='unicode_escape',usecols=['ID20','STATE','COUNTY','P20','eq_P10'])df.P20.sum()df.COUNTY.replace({r'[^\x00-\x7F]+':''},regex=True,inplace=True)
df.COUNTY.replace({r'([A-Z][a-z]+)([A-Z]+)':r'\1'},regex=True,inplace=True)len(df)df['block_diff'] = df['P20'] - df['eq_P10']
df['block_diff'] = df['block_diff'].round()
df['block_net'] = df['block_diff'].apply(lambda x: 1 if x>0 else ( -1 if x<0 else 0))
df.head()df['eq_P10'] = df['eq_P10'].round()
df['error'] = (df['P20']-df['eq_P10']) - df['block_diff']
df.head()df['eq_P10'] = df['eq_P10'] + df['error']
df[(df['P20']-df['eq_P10'])!=(df['block_diff'])]df[['ID20','COUNTY','P20','eq_P10','block_diff','block_net']].to_parquet('data/total_attr_gen_df.parquet') #save attributes to be added laterdf = pd.read_parquet('data/total_attr_gen_df.parquet')
df.head()def calculate_points(row):
net = row[-1]
p20 = row[0]
p10 = row[1]
if net < 0:
return p20 + p10
else: return p20df['points'] = df[['P20','eq_P10','block_net']].apply(calculate_points,axis=1)dfcounty2id = pickle.load(open('county2id.pkl','rb'))df.COUNTY.value_counts().head(10)df = df[df.points!=0].reset_index(drop=True)df[df['COUNTY'] == 'Maricopa County'].points.sum()df['points'] = df['points'].astype('int32')counties = df[['COUNTY','points']].apply(lambda row: [county2id[row[0]]]*row[1],axis=1)gcounties = cudf.from_pandas(counties)counties_list = gcounties.explode().reset_index(drop=True)pickle.dump(counties_list,open('county_list.pkl','wb'))len(counties_list)print(len(df))df =df[df.points!=0]print(len(df))gen_df = df[['ID20','STATE','points']]gen_df.to_csv('data/total_population_gen_df.csv')len(gen_df) | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population/add_race_net_county_to_population.ipynb | import cudf, cupy as cp
import pandas as pd,numpy as np
import random, pickle
pd.set_option('display.max_colwidth', 100000)df = pd.read_parquet('data/total_parts_combined.parquet').reset_index(drop=True)
# df = cudf.from_pandas(df)adf = pd.read_parquet('data/total_attr_gen_df.parquet')
adf.head()adf['points'] = adf[['P20','eq_P10','block_net']].apply(lambda row: row[0] if row[-1] >=0 else row[0] + row[1],axis=1)
adf.head()adf.points.sum()full_2020_path='data/nhgis0007_csv/nhgis0007_ds248_2020_block.csv'
races = cudf.read_csv(full_2020_path,usecols=['GEOCODE','U7B001','U7B003','U7B004','U7B005','U7B006','U7B007','U7B008','U7B009'],encoding='unicode_escape',dtype={'GEOCODE':'int64','U7B003':'int32','U7B004':'int32','U7B005':'int32','U7B006':'int32','U7B007':'int32','U7B008':'int32','U7B009':'int32'})
races.rename(columns={'GEOCODE':'ID20'},inplace=True)races['block_net'] = adf['block_net']
races['P10'] = adf['eq_P10']
races['points'] = adf['points']craces = races.to_pandas()craces.rename(columns={'U7B001':'P20','U7B003':'R1','U7B004':'R2','U7B005':'R3','U7B006':'R4','U7B007':'R5','U7B008':'R6','U7B009':'R7'},inplace=True)
craces.head(6)craces = craces[(craces.P20!=0) | (craces.P10!=0)]craces['P20'].sum() + craces[craces.block_net==-1]['P10'].sum()craces['P10'] = craces['P10'].astype('int32')craces['R1'] = craces['R1'].apply(lambda x : [1]*x)
craces['R2'] = craces['R2'].apply(lambda x : [2]*x)
craces['R3'] = craces['R3'].apply(lambda x : [3]*x)
craces['R4'] = craces['R4'].apply(lambda x : [4]*x)
craces['R5'] = craces['R5'].apply(lambda x : [5]*x)
craces['R6'] = craces['R6'].apply(lambda x : [6]*x)
craces['R7'] = craces['R7'].apply(lambda x : [7]*x)
craces['P10_list'] = craces[['P10','block_net']].apply(lambda row : [0]*row[0] if row[1] <0 else [],axis=1)craces['pop'] = craces['R1']+craces['R2']+craces['R3']+craces['R4']+craces['R5']+craces['R6']+craces['R7'] + craces['P10_list']craces = craces[['ID20','P20','P10','block_net','pop','points']]craces = craces.reset_index(drop=True)craces.iloc[[185909]]pickle.dump(craces,open('craces_list3.pkl','wb'))craces = pickle.load(open('craces_list3.pkl','rb'))
craces# # craces = races.to_pandas()
# # craces.rename(columns={'U7B001':'P20','U7B003':'R1','U7B004':'R2','U7B005':'R3','U7B006':'R4','U7B007':'R5','U7B008':'R6','U7B009':'R7'},inplace=True)
# craces = craces[['ID20','P20','P10','block_net','points']]
# # del(races)
# craces.head()# craces = craces[(craces.P20!=0) | (craces.P10!=0) ]craces.P20.sum() + craces[(craces.block_net==-1)].P10.sum()# craces[['P20','P10','block_net','points']] = craces[['P20','P10','block_net','points']].astype('int32')craces['net_list'] = craces[['P20','P10','block_net']].apply(lambda row: [1]*(row[0]-row[1]) + [0]*row[1] if row[2] >=0 else [0]*row[0] + [-1]*(row[1]-row[0]) + [-99]*row[0],axis=1) # in -> p20-p10; stationary -> p10; | stationary-> p20 out -> p10-p20 extra -> p20 craces# pickle.dump(craces,open('race_and_net_list.pkl','wb'))craces = pickle.load(open('race_and_net_list.pkl','rb'))# graces = cudf.from_pandas(craces['pop'])
# gnet = cudf.from_pandas(craces['net_list'])edf = pd.DataFrame()# edf['races'] = graces.explode('pop').reset_index(drop=True)['pop']
edf['races'] = craces['pop'].explode().reset_index(drop=True)
edf['net_list'] = craces['net_list'].explode().reset_index(drop=True)edf.rename(columns={'net_list':'net'},inplace=True)# pickle.dump(edf,open('race_and_net_exploded.pkl','wb'))edf = pickle.load(open('race_and_net_exploded.pkl','rb'))county_ser = pickle.load(open('county_list.pkl','rb'))
county_ser = county_ser.to_pandas()edf['county'] = county_serdf[['race','net','county']] = edf# df.to_parquet('data/total_population_dataset.parquet')df = pd.read_parquet('data/total_population_dataset.parquet')df = df[df.net!=-99]df.to_parquet('data/total_population_dataset.parquet')small = df.sample(1000000)small.to_parquet('data/total_population_dataset_sm.parquet')len(net_list)pickle.dump(net_list,open('data/net_exploded3.pkl','wb'))craces = pickle.load(open('craces_list3.pkl','rb'))graces = cudf.from_pandas(craces[['ID20','pop']])temp =graces.explode('pop')temp = temp.reset_index(drop=True)['pop']ctemp = temp.to_pandas()ctemp[ctemp==-1]# ctemp.reset_index(drop=True).to_parquet('data/total_races.parquet')pickle.dump(ctemp,open('data/races_exploded3.pkl','wb'))races = pickle.load(open('data/races_exploded3.pkl','rb'))df['races'] = racesdf = df.drop('ID20',axis=1) # change if you need idcounty_ser = pickle.load(open('county_list.pkl','rb'))df = df.reset_index(drop=True)
df.head()df['county'] = county_ser.to_pandas()df.to_parquet('data/population_county_races3.parquet')attr = pd.read_parquet('data/population_county_races3.parquet')
net = pickle.load(open('data/net_exploded3.pkl','rb'))attr = attr.reset_index(drop=True)
net = net.reset_index(drop=True)attr['net'] = net.to_pandas()# attr = pd.read_parquet('data/total_population_dataset3.parquet')# attr.to_parquet('data/total_population_dataset3.parquet') | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population/SeparateTotalDatasetsByState.py | import cudf
import cuspatial
import geopandas as gpd
import os
from shapely.geometry import Polygon, MultiPolygon
DATA_PATH = "../data"
DATA_PATH_STATE = f"{DATA_PATH}/state-wise-population"
# create DATA_PATH if it does not exist
if not os.path.exists(DATA_PATH_STATE):
os.makedirs(DATA_PATH_STATE)
# Read the total population dataset as a cudf dataframe from the parquet file
df = cudf.read_parquet(f"{DATA_PATH}/total_population_dataset.parquet")
# Read the shapefile as a cuspatial dataframe and get the state names and geometries
# downloaded from https://hub.arcgis.com/datasets/1b02c87f62d24508970dc1a6df80c98e/explore
shapefile_path = f"{DATA_PATH}/States_shapefile/States_shapefile.shp"
states_data = gpd.read_file(shapefile_path)[
["State_Code", "State_Name", "geometry"]
].to_crs(3857)
print("Number of states to process: ", len(states_data))
print("Number of points in total population dataset: ", len(df))
print("Processing states with Polygon geometries...")
processed_states = 0
# Loop through the states and get the points in each state and save as a separate dataframe
# process all Polygon geometries in the shapefile
for index, row in states_data.iterrows():
if isinstance(row["geometry"], MultiPolygon):
# skip MultiPolygon geometries
continue
state_name = row["State_Name"]
processed_states += 1
print(
"Processing state: ",
state_name,
" (",
processed_states,
"/",
len(states_data),
")",
)
if os.path.exists(f"{DATA_PATH_STATE}/{state_name}.parquet"):
print("State already processed. Skipping...")
continue
# process all MultiPolygon geometries in the shapefile
# Use cuspatial point_in_polygon to get the points in the state from the total population dataset
state_geometry = cuspatial.GeoSeries(
gpd.GeoSeries(row["geometry"]), index=["selection"]
)
# Loop through the total population dataset in batches of 50 million points to avoid OOM issues
batch_size = 50_000_000
points_in_state = cudf.DataFrame({"selection": []})
for i in range(0, len(df), batch_size):
# get the batch of points
batch = df[i : i + batch_size][["easting", "northing"]]
# convert to GeoSeries
points = cuspatial.GeoSeries.from_points_xy(
batch.interleave_columns().astype("float64")
)
# get the points in the state from the batch
points_in_state_current_batch = cuspatial.point_in_polygon(
points, state_geometry
)
# append the points in the state from the batch to the points_in_state dataframe
points_in_state = cudf.concat([points_in_state, points_in_state_current_batch])
# free up memory
del batch
print(
f"Number of points in {state_name}: ",
df[points_in_state["selection"]].shape[0],
)
# save the points in the state as a separate dataframe
df[points_in_state["selection"]].to_parquet(
f"{DATA_PATH_STATE}/{state_name}.parquet"
)
print("Processing states with MultiPolygon geometries...")
# process all MultiPolygon geometries in the shapefile
for index, row in states_data.iterrows():
if isinstance(row["geometry"], Polygon):
# skip Polygon geometries
continue
state_name = row["State_Name"]
processed_states += 1
print(
"Processing state: ",
state_name,
" (",
processed_states,
"/",
len(states_data),
")",
)
if os.path.exists(f"{DATA_PATH_STATE}/{state_name}.parquet"):
print("State already processed. Skipping...")
continue
# process all MultiPolygon geometries in the shapefile
points_in_state = None
for polygon in list(row["geometry"].geoms):
# process each polygon in the MultiPolygon
state_geometry = cuspatial.GeoSeries(
gpd.GeoSeries(polygon), index=["selection"]
)
# Loop through the total population dataset in batches of 50 million points to avoid OOM issues
batch_size = 50_000_000
points_in_state_current_polygon = cudf.DataFrame({"selection": []})
for i in range(0, len(df), batch_size):
# get the batch of points
batch = df[i : i + batch_size][["easting", "northing"]]
# convert to GeoSeries
points = cuspatial.GeoSeries.from_points_xy(
batch.interleave_columns().astype("float64")
)
# get the points in the state from the batch
points_in_state_current_batch = cuspatial.point_in_polygon(
points, state_geometry
)
# append the points in the state from the batch to the points_in_state_current_polygon dataframe
points_in_state_current_polygon = cudf.concat(
[points_in_state_current_polygon, points_in_state_current_batch]
)
# free up memory
del batch
points_in_state = (
points_in_state_current_polygon
if points_in_state is None
else points_in_state | points_in_state_current_polygon
)
print(
f"Number of points in {state_name}: ",
df[points_in_state["selection"]].shape[0],
)
# save the points in the state as a separate dataframe
df[points_in_state["selection"]].to_parquet(
f"{DATA_PATH_STATE}/{state_name}.parquet"
)
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_total_population/.ipynb_checkpoints/SeparateTotalDatasetsByState-checkpoint.ipynb | import cudf
import pickle# Load the dataset
df = cudf.read_parquet('../data/total_population_dataset.parquet')
df.head()# Load the state to county mapping
id2county = pickle.load(open('../id2county.pkl','rb'))
df_counties = cudf.DataFrame(dict(idx=list(id2county.keys()), county=list(id2county.values())))
# Lowercase the county names for easier merging
df_counties['county_lower'] = df_counties.county.str.lower()
df_counties.head()# Dataset downloaded from https://public.opendatasoft.com/explore/dataset/georef-united-states-of-america-county/export/?disjunctive.ste_code&disjunctive.ste_name&disjunctive.coty_code&disjunctive.coty_name
county_state_df = cudf.read_csv('../data/us-counties1.csv', delimiter=";")[['Official Name County', 'Type', 'Official Name State']].dropna()
county_state_df.columns = ['county', 'type', 'state']
county_state_df.head()# Add the type to the county name
county_state_df['county'] = county_state_df.apply(lambda row: row['county'] + ' ' + row['type'], axis=1)
# Remove non-ascii characters and abbreviations to match the other id2county mapping dataset
county_state_df['county'] = county_state_df.county.to_pandas().replace({r'[^\x00-\x7F]+': '', r'([A-Z][a-z]+)([A-Z]+)': r'\1'}, regex=True)
# Lowercase the county names for easier merging
county_state_df['county_lower'] = county_state_df['county'].str.lower()# Merge the datasets and drop duplicates to get the state for each county in the total population dataset
df_map_county_to_states = df_counties.merge(county_state_df, on='county_lower', how='left', suffixes=['', '_y']).drop_duplicates(subset=['county_lower'])[['idx', 'county', 'state' ]]# Fill in the states for unavailable states manually by looking at the counties
# Carson City, Nevada
# District of Columbia, Washington DC
# Remaining, Connecticut
df_map_county_to_states.loc[df_map_county_to_states.county == 'Carson City', 'state'] = 'Nevada'
df_map_county_to_states.loc[df_map_county_to_states.county == 'District of Columbia', 'state'] = 'Nevada'
df_map_county_to_states.loc[df_map_county_to_states.isna().any(axis=1), 'state'] = 'Connecticut'# Save the mapping
df_map_county_to_states.to_parquet('../data/county_to_state_mapping.parquet')df_map_county_to_states | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_net_migration/gen_table_with_race_migration.ipynb | import pandas as pd
import geopandas as gpd
import ast,os,random
pd.set_option('display.float_format','{:.1f}'.format)
import warnings
warnings.filterwarnings('ignore')
import cudf, cupy as cp
import numpy as np
import time
# pd.set_option('display.max_colwidth', -1)full_2020_path='data/nhgis0007_csv/nhgis0007_ds248_2020_block.csv'
full_2010_path='data/race2010/nhgis0004_csv/nhgis0004_ds172_2010_block.csv'race_10 = cudf.read_csv(full_2010_path,usecols=['GISJOIN','STATEA','H7X001','H7X002','H7X003','H7X004','H7X005','H7X006','H7X007','H7X008'])
race_10.head()race_10.GISJOIN = race_10.GISJOIN.str.replace('G', '')
race_10.rename(columns={'H7X001':'P10','STATEA':'STATE','H7X002':'R101','H7X003':'R102','H7X004':'R103','H7X005':'R104','H7X006':'R105','H7X007':'R106','H7X008':'R107'},inplace=True)
race_10['ID10'] = (race_10.GISJOIN.str.slice(start=0,stop=2) + race_10.GISJOIN.str.slice(start=3,stop=6) + race_10.GISJOIN.str.slice(start=7)).astype('int64')
race_10.head()race_20 = cudf.read_csv(full_2020_path,usecols=['GISJOIN','GEOCODE','STATEA','U7B001','U7B003','U7B004','U7B005','U7B006','U7B007','U7B008','U7B009'],dtype={'U7B001':'float32','U7B003':'float32','U7B004':'float32','U7B005':'float32','U7B006':'float32','U7B007':'float32','U7B008':'float32','U7B009':'float32'})
race_20.head()# df = small.copy()
race_20.rename(columns={'U7B001':'P20','STATEA':'STATE','GEOCODE':'ID20','U7B003':'R201','U7B004':'R202','U7B005':'R203','U7B006':'R204','U7B007':'R205','U7B008':'R206','U7B009':'R207'},inplace=True)
race_20.GISJOIN = race_20.GISJOIN.str.replace('G', '').astype('int64')
race_20.head()states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
54:"WV",55:"WI",56:"WY",72:"PR"}
# states = {11:"DC",10:"DE"}
def concat_states_mapper(state_key_list):
concat_mapper = cudf.DataFrame()
for i in state_key_list:
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path = 'data/block_rel_files/tab2010_tab2020_st%s_%s.csv'%(i_str,states[i].lower())
if os.path.isfile(path):
mapper = cudf.read_csv(path,delimiter='|')
concat_mapper = cudf.concat([concat_mapper,mapper])
else:
print("mapper file does not exist")
continue
return concat_mappermapper_df = concat_states_mapper(states.keys())
mapper_df.rename(columns={'STATE_2010':'STATE_2010'},inplace=True)
mapper_df.head(2)mapper_df['ID10'] = (mapper_df.STATE_2010.astype('str').str.rjust(2,'0') + mapper_df.COUNTY_2010.astype('str').str.rjust(3,'0') + mapper_df.TRACT_2010.astype('str').str.rjust(6,'0') + mapper_df.BLK_2010.astype('str').str.rjust(4,'0')).astype('int64')
mapper_df['ID20'] = (mapper_df.STATE_2020.astype('str').str.rjust(2,'0') + mapper_df.COUNTY_2020.astype('str').str.rjust(3,'0') + mapper_df.TRACT_2020.astype('str').str.rjust(6,'0') + mapper_df.BLK_2020.astype('str').str.rjust(4,'0')).astype('int64')
mapper_df = mapper_df[['ID10','ID20','STATE_2020']].reset_index()
mapper_df.head()freq_df = mapper_df.ID10.value_counts().reset_index().sort_values('index')
freq_df.rename(columns={'ID10':'freq'},inplace=True)
freq_df.rename(columns={'index':'ID10'},inplace=True)
freq_df = freq_df.reset_index(drop=True)
race_10.P10 = race_10.P10.astype('float32')
freq_df = cudf.merge(freq_df,race_10[['ID10','P10','R101','R102','R103','R104','R105','R106','R107']],on='ID10',how='right').sort_values('ID10')
freq_df['P10_weights'] = freq_df['P10'].divide(freq_df['freq'])
freq_df['R101_weights'] = freq_df['R101'].divide(freq_df['freq'])
freq_df['R102_weights'] = freq_df['R102'].divide(freq_df['freq'])
freq_df['R103_weights'] = freq_df['R103'].divide(freq_df['freq'])
freq_df['R104_weights'] = freq_df['R104'].divide(freq_df['freq'])
freq_df['R105_weights'] = freq_df['R105'].divide(freq_df['freq'])
freq_df['R106_weights'] = freq_df['R106'].divide(freq_df['freq'])
freq_df['R107_weights'] = freq_df['R107'].divide(freq_df['freq'])
freq_df.head()weighted_mapper = cudf.merge(mapper_df,freq_df[['ID10','P10_weights','R101_weights','R102_weights','R103_weights','R104_weights','R105_weights','R106_weights','R107_weights']],on='ID10',how='left').sort_values('ID20').reset_index(drop=True)eq_10 = weighted_mapper.groupby('ID20')[['ID10','P10_weights','R101_weights','R102_weights','R103_weights','R104_weights','R105_weights','R106_weights','R107_weights']].sum().reset_index().sort_values('ID20').reset_index(drop=True)
del(weighted_mapper)
eq_10.rename(columns={'P10_weights':'eq_P10','R101_weights':'eq_R101','R102_weights':'eq_R102','R103_weights':'eq_R103','R104_weights':'eq_R104','R105_weights':'eq_R105','R106_weights':'eq_R106','R107_weights':'eq_R107'},inplace=True)
eq_10.head()race_20.head(2)print(len(eq_10))
print(len(race_20))race_df = eq_10[['ID20']].copy()race_df['P_diff'] = race_20['P20'].sub(eq_10['eq_P10'])
race_df['R1_diff'] = race_20['R201'].sub(eq_10['eq_R101'])
race_df['R2_diff'] = race_20['R202'].sub(eq_10['eq_R102'])
race_df['R3_diff'] = race_20['R203'].sub(eq_10['eq_R103'])
race_df['R4_diff'] = race_20['R204'].sub(eq_10['eq_R104'])
race_df['R5_diff'] = race_20['R205'].sub(eq_10['eq_R105'])
race_df['R6_diff'] = race_20['R206'].sub(eq_10['eq_R106'])
race_df['R7_diff'] = race_20['R207'].sub(eq_10['eq_R107'])
race_df.head()df = cudf.read_csv('data/mapped_blocks_full.csv',dtype={'GISJOIN':'int64','ID20':'int64','STATE':'int32','COUNTY':'str','P20':'int32','P10_new':'int32'}).drop('Unnamed: 0',axis=1)
df['P_delta']=df['P20'] - df['eq_P10']
# df = df[df.number!=0]
df.head()print(len(race_df))
print(len(df))final_df = cudf.merge(df[['ID20','STATE','COUNTY','P_delta']],race_df.drop(columns=['P_diff']),on='ID20').sort_values('ID20')
del(df,race_10,race_20,race_df,eq_10,freq_df)
final_df.head()final_df['P_delta'] = final_df.P_delta.round().astype('int32')
final_df['number'] = final_df.P_delta.round().abs().astype('int32')
final_df['R1_diff'] = final_df.R1_diff.round().astype('int32')
final_df['R2_diff'] = final_df.R2_diff.round().astype('int32')
final_df['R3_diff'] = final_df.R3_diff.round().astype('int32')
final_df['R4_diff'] = final_df.R4_diff.round().astype('int32')
final_df['R5_diff'] = final_df.R5_diff.round().astype('int32')
final_df['R6_diff'] = final_df.R6_diff.round().astype('int32')
final_df['R7_diff'] = final_df.R7_diff.round().astype('int32')
final_df.head()final_df.number.sum()print(len(final_df))final_df['error'] = final_df['P_delta'] - final_df[['R1_diff','R2_diff','R3_diff','R4_diff','R5_diff','R6_diff','R7_diff']].sum(axis=1)
final_df['R7_diff'] = final_df['R7_diff'] + final_df['error'] # fix error by adjusting R7
print(len(final_df[final_df['P_delta']!= final_df[['R1_diff','R2_diff','R3_diff','R4_diff','R5_diff','R6_diff','R7_diff']].sum(axis=1)]))final_df.iloc[:,4:-2].abs().sum().sum()final_df.drop(columns=['error'],inplace=True)
final_df.head(3)final_df.to_csv('data/race_migration_diff.csv')races = cudf.read_csv('data/race_migration_diff.csv')
races.head()races['points']= races[['R1_diff','R2_diff','R3_diff','R4_diff','R5_diff','R6_diff','R7_diff']].abs().sum(axis=1)len(races)races = races[races['points']!=0]
print(len(races))
races.head()races.drop(columns=['Unnamed: 0'],inplace=True)races.head()races.to_csv('data/mapped_data_with_race.csv') | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_net_migration/assign_race.ipynb | import cudf, cupy as cp
import os,random
import pandas as pd, numpy as npraces = cudf.read_csv('data/mapped_data_with_race.csv',usecols=['ID20','P_delta','R1_diff','R2_diff','R3_diff','R4_diff','R5_diff','R6_diff','R7_diff'])
races.head()races = races.to_pandas()races['R1_diff'] =races['R1_diff'].apply(lambda x : [1]*abs(x) if x>0 else ([-1]*abs(x) if x<0 else []) )
races['R2_diff'] =races['R2_diff'].apply(lambda x : [2]*abs(x) if x>0 else ([-2]*abs(x) if x<0 else []) )
races['R3_diff'] =races['R3_diff'].apply(lambda x : [3]*abs(x) if x>0 else ([-3]*abs(x) if x<0 else []) )
races['R4_diff'] =races['R4_diff'].apply(lambda x : [4]*abs(x) if x>0 else ([-4]*abs(x) if x<0 else []) )
races['R5_diff'] =races['R5_diff'].apply(lambda x : [5]*abs(x) if x>0 else ([-5]*abs(x) if x<0 else []) )
races['R6_diff'] =races['R6_diff'].apply(lambda x : [6]*abs(x) if x>0 else ([-6]*abs(x) if x<0 else []) )
races['R7_diff'] =races['R7_diff'].apply(lambda x : [7]*abs(x) if x>0 else ([-7]*abs(x) if x<0 else []) )
races['pop'] = races['R1_diff']+races['R2_diff']+races['R3_diff']+races['R4_diff']+races['R5_diff']+races['R6_diff']+races['R7_diff']
races['pop'] = races['pop'].apply(lambda x: random.sample(x,len(x))) # shuffle races
races.head()gpu_races = cudf.from_pandas(races[['ID20','pop']])
del(races)gpu_races = gpu_races.explode('pop').reset_index(drop=True)
gpu_races.head()# gpu_races.to_pandas().to_csv('data/full_races_assigned.csv')races = pd.read_csv('data/full_races_assigned.csv').drop('Unnamed: 0',axis=1)
races = cudf.from_pandas(races)
races.head()population = pd.read_csv('data/final_data_with_race.csv').drop('Unnamed: 0',axis=1)
population = cudf.from_pandas(population)
population.head()population_with_race = cudf.concat([population,races['pop']],axis=1)
population_with_race.head()temp = population_with_race.to_pandas()temp.to_csv('data/population_race_concatenated.csv')pop = dask_cudf.read_csv('data/population_race_concatenated.csv',usecols=['ID20','x','y','pop'],dtype={'ID20':'int64','x':'float32','y':'float32','pop':'int32'})
# pop = cudf.from_pandas(pop)
pop.head()df = pd.read_csv('data/blocks_with_attr.csv',encoding='unicode_escape',usecols=['ID20','STATE','COUNTY','P_delta'],dtype={'ID20':'int64','STATE':'int32','COUNTY':'str','P_delta':'int32'})
# df = cudf.from_pandas(df)
df.head()df.COUNTY.replace({r'[^\x00-\x7F]+':''},regex=True,inplace=True)
df.COUNTY.replace({r'([A-Z][a-z]+)([A-Z]+)':r'\1'},regex=True,inplace=True)df = cudf.from_pandas(df)df = dask_cudf.from_cudf(df,npartitions=33)len(df)# Split dataset to manage OOM
# concat_data1 = pd.DataFrame()
# concat_data2 = pd.DataFrame()
# concat_data3 = pd.DataFrame()
# concat_data4 = pd.DataFrame()
# concat_data5 = pd.DataFrame()
# concat_data12 = None
# concat_data34 = None
# concat_data12345 = None
# def prepare_final_data(i,pop,df):
# global concat_data1
# global concat_data2
# global concat_data3
# global concat_data4
# global concat_data5
# global concat_data12
# global concat_data34
# global concat_data12345
# pop = cudf.from_pandas(pop)
# df = cudf.from_pandas(df)
# merged_data = pop.merge(df,on='ID20',how='left').sort_values('ID20')
# # print(merged_data.head())
# del(pop,df)
# if i <= 12:
# concat_data1 = pd.concat([concat_data1,merged_data.to_pandas()])
# elif i <= 24:
# concat_data2 = pd.concat([concat_data2,merged_data.to_pandas()])
# if i== 24:
# concat_data12 = pd.concat([concat_data1,concat_data2])
# concat_data12.to_csv('data/concat_data12.csv')
# del(concat_data1,concat_data2)
# elif i <= 36:
# concat_data3 = pd.concat([concat_data3,merged_data.to_pandas()])
# elif i <= 47:
# concat_data4 = pd.concat([concat_data4,merged_data.to_pandas()])
# if i == 47:
# concat_data34 = pd.concat([concat_data3,concat_data4])
# concat_data34.to_csv('data/concat_data34.csv')
# del(concat_data3,concat_data4)
# else:
# concat_data5 = pd.concat([concat_data5,merged_data.to_pandas()])
# if i == 72:
# concat_data5.to_csv('data/concat_data5.csv')
# concat12345 = pd.concat([concat_data34,concat_data5])
# del(concat_data12,concat_data34,concat_data5)
# states = {25:"MA",26:"MI",27:"MN",
# 28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
# 40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
# 54:"WV",55:"WI",56:"WY",72:"PR"}
# for i in states.keys():
# print(i)
# l1 = int(str(i)+'0'*13)
# l2 = int(str(i+1)+'0'*13)
# pop1 = pop[(pop.ID20>=l1) & (pop.ID20<=l2)]
# df1 = df[df.STATE==i]
# prepare_final_data(i,pop1,df1)dataset = pop.merge(df,on='ID20',how='left')
dataset.head()dataset['P_net'] = dataset['P_delta'].apply(lambda x: -1 if x < 0 else ( 1 if x>0 else 0))
dataset = dataset.reset_index(drop=True)
dataset = dataset.rename(columns ={'pop':'race','ID20':'blockid','STATE':'state','P_delta':'block_diff','COUNTY':'county','P_net':'block_net'})print(len(dataset))
dataset.head()# dataset.to_parquet('data/census_migration_dataset.parquet') | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_net_migration/compute_race.ipynb | import pandas as pd
import geopandas as gpd
import ast,os,random
pd.set_option('display.float_format','{:.1f}'.format)
import warnings
warnings.filterwarnings('ignore')
import cudf, cupy as cp
import numpy as np
import time
# pd.set_option('display.max_colwidth', -1)full_2020_path='data/nhgis0007_csv/nhgis0007_ds248_2020_block.csv'
full_2010_path='data/race2010/nhgis0004_csv/nhgis0004_ds172_2010_block.csv'race_10 = cudf.read_csv(full_2010_path,usecols=['GISJOIN','STATEA','H7X001','H7X002','H7X003','H7X004','H7X005','H7X006','H7X007','H7X008'])
race_10.head()race_10.GISJOIN = race_10.GISJOIN.str.replace('G', '')
race_10.rename(columns={'H7X001':'P10','STATEA':'STATE','H7X002':'R101','H7X003':'R102','H7X004':'R103','H7X005':'R104','H7X006':'R105','H7X007':'R106','H7X008':'R107'},inplace=True)
race_10['ID10'] = (race_10.GISJOIN.str.slice(start=0,stop=2) + race_10.GISJOIN.str.slice(start=3,stop=6) + race_10.GISJOIN.str.slice(start=7)).astype('int64')
race_10.head()race_20 = cudf.read_csv(full_2020_path,usecols=['GISJOIN','GEOCODE','STATEA','U7B001','U7B003','U7B004','U7B005','U7B006','U7B007','U7B008','U7B009'],dtype={'U7B001':'float32','U7B003':'float32','U7B004':'float32','U7B005':'float32','U7B006':'float32','U7B007':'float32','U7B008':'float32','U7B009':'float32'})
race_20.head()# df = small.copy()
race_20.rename(columns={'U7B001':'P20','STATEA':'STATE','GEOCODE':'ID20','U7B003':'R201','U7B004':'R202','U7B005':'R203','U7B006':'R204','U7B007':'R205','U7B008':'R206','U7B009':'R207'},inplace=True)
race_20.GISJOIN = race_20.GISJOIN.str.replace('G', '').astype('int64')
race_20.head()states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
54:"WV",55:"WI",56:"WY",72:"PR"}
# states = {11:"DC",10:"DE"}
def concat_states_mapper(state_key_list):
concat_mapper = cudf.DataFrame()
for i in state_key_list:
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path = 'data/block_rel_files/tab2010_tab2020_st%s_%s.csv'%(i_str,states[i].lower())
if os.path.isfile(path):
mapper = cudf.read_csv(path,delimiter='|')
concat_mapper = cudf.concat([concat_mapper,mapper])
else:
print("mapper file does not exist")
continue
return concat_mappermapper_df = concat_states_mapper(states.keys())
mapper_df.rename(columns={'STATE_2010':'STATE_2010'},inplace=True)
mapper_df.head(2)mapper_df['ID10'] = (mapper_df.STATE_2010.astype('str').str.rjust(2,'0') + mapper_df.COUNTY_2010.astype('str').str.rjust(3,'0') + mapper_df.TRACT_2010.astype('str').str.rjust(6,'0') + mapper_df.BLK_2010.astype('str').str.rjust(4,'0')).astype('int64')
mapper_df['ID20'] = (mapper_df.STATE_2020.astype('str').str.rjust(2,'0') + mapper_df.COUNTY_2020.astype('str').str.rjust(3,'0') + mapper_df.TRACT_2020.astype('str').str.rjust(6,'0') + mapper_df.BLK_2020.astype('str').str.rjust(4,'0')).astype('int64')
mapper_df = mapper_df[['ID10','ID20','STATE_2020']].reset_index()
mapper_df.head()freq_df = mapper_df.ID10.value_counts().reset_index().sort_values('index')
freq_df.rename(columns={'ID10':'freq'},inplace=True)
freq_df.rename(columns={'index':'ID10'},inplace=True)
freq_df = freq_df.reset_index(drop=True)
race_10.P10 = race_10.P10.astype('float32')
freq_df = cudf.merge(freq_df,race_10[['ID10','P10','R101','R102','R103','R104','R105','R106','R107']],on='ID10',how='right').sort_values('ID10')
freq_df['P10_weights'] = freq_df['P10'].divide(freq_df['freq'])
freq_df['R101_weights'] = freq_df['R101'].divide(freq_df['freq'])
freq_df['R102_weights'] = freq_df['R102'].divide(freq_df['freq'])
freq_df['R103_weights'] = freq_df['R103'].divide(freq_df['freq'])
freq_df['R104_weights'] = freq_df['R104'].divide(freq_df['freq'])
freq_df['R105_weights'] = freq_df['R105'].divide(freq_df['freq'])
freq_df['R106_weights'] = freq_df['R106'].divide(freq_df['freq'])
freq_df['R107_weights'] = freq_df['R107'].divide(freq_df['freq'])
freq_df.head()weighted_mapper = cudf.merge(mapper_df,freq_df[['ID10','P10_weights','R101_weights','R102_weights','R103_weights','R104_weights','R105_weights','R106_weights','R107_weights']],on='ID10',how='left').sort_values('ID20').reset_index(drop=True)eq_10 = weighted_mapper.groupby('ID20')[['ID10','P10_weights','R101_weights','R102_weights','R103_weights','R104_weights','R105_weights','R106_weights','R107_weights']].sum().reset_index().sort_values('ID20').reset_index(drop=True)
del(weighted_mapper)
eq_10.rename(columns={'P10_weights':'eq_P10','R101_weights':'eq_R101','R102_weights':'eq_R102','R103_weights':'eq_R103','R104_weights':'eq_R104','R105_weights':'eq_R105','R106_weights':'eq_R106','R107_weights':'eq_R107'},inplace=True)
eq_10.head()race_20.head(2)print(len(eq_10))
print(len(race_20))race_df = eq_10[['ID20']].copy()race_df['P_diff'] = race_20['P20'].sub(eq_10['eq_P10'])
race_df['R1_diff'] = race_20['R201'].sub(eq_10['eq_R101'])
race_df['R2_diff'] = race_20['R202'].sub(eq_10['eq_R102'])
race_df['R3_diff'] = race_20['R203'].sub(eq_10['eq_R103'])
race_df['R4_diff'] = race_20['R204'].sub(eq_10['eq_R104'])
race_df['R5_diff'] = race_20['R205'].sub(eq_10['eq_R105'])
race_df['R6_diff'] = race_20['R206'].sub(eq_10['eq_R106'])
race_df['R7_diff'] = race_20['R207'].sub(eq_10['eq_R107'])
race_df.head()df = cudf.read_csv('data/mapped_data_full.csv',dtype={'GISJOIN':'int64','ID20':'int64','STATE':'int32','COUNTY':'str','P20':'int32','P10_new':'int32'}).drop('Unnamed: 0',axis=1)
df['P_delta']=df['P20'] - df['eq_P10']
# df = df[df.number!=0]
df.head()print(len(race_df))
print(len(df))final_df = cudf.merge(df[['ID20','STATE','COUNTY','P_delta']],race_df.drop(columns=['P_diff']),on='ID20').sort_values('ID20')
del(df,race_10,race_20,race_df,eq_10,freq_df)
final_df.head()final_df['P_delta'] = final_df.P_delta.round().astype('int32')
final_df['number'] = final_df.P_delta.round().abs().astype('int32')
final_df['R1_diff'] = final_df.R1_diff.round().astype('int32')
final_df['R2_diff'] = final_df.R2_diff.round().astype('int32')
final_df['R3_diff'] = final_df.R3_diff.round().astype('int32')
final_df['R4_diff'] = final_df.R4_diff.round().astype('int32')
final_df['R5_diff'] = final_df.R5_diff.round().astype('int32')
final_df['R6_diff'] = final_df.R6_diff.round().astype('int32')
final_df['R7_diff'] = final_df.R7_diff.round().astype('int32')
final_df.head()print(len(final_df))final_df['error'] = final_df['P_delta'] - final_df[['R1_diff','R2_diff','R3_diff','R4_diff','R5_diff','R6_diff','R7_diff']].sum(axis=1)
final_df['R7_diff'] = final_df['R7_diff'] + final_df['error'] # fix error by adjusting R7
print(len(final_df[final_df['P_delta']!= final_df[['R1_diff','R2_diff','R3_diff','R4_diff','R5_diff','R6_diff','R7_diff']].sum(axis=1)]))final_df.drop(columns=['error'],inplace=True)
final_df.head(3)# final_df.to_csv('data/computed_race.csv')races = cudf.read_csv('data/computed_race.csv')
races.head()races['points']= races[['R1_diff','R2_diff','R3_diff','R4_diff','R5_diff','R6_diff','R7_diff']].abs().sum(axis=1)len(races)races = races[races['points']!=0]
print(len(races))
races.head()races.drop(columns=['Unnamed: 0'],inplace=True)races.head()# races.to_csv('data/mapped_data_with_race.csv') | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_net_migration/gen_race_mig_points.ipynb | import pandas as pd, numpy as np
import cudf, cupy as cp
import os# df = cudf.read_csv('census_full/HI_DE_DC_mapped_data.csv').drop('Unnamed: 0',axis=1)
df = cudf.read_csv('data/mapped_data_with_race.csv',usecols=['ID20','R'],dtype={'ID20':'int64','STATE':'int32','COUNTY':'str','points':'int32'})
df.head()len(df)df = df[df.STATE==12].reset_index(drop=True)len(df)df = df.iloc[:150000]df.head()df.tail()def random_points_in_polygon(number, polygon):
# print(polygon)
points_x = np.array([])
points_y = np.array([])
min_x, min_y, max_x, max_y = polygon.bounds
i= 0
while i < number:
point_x = random.uniform(min_x, max_x)
point_y = random.uniform(min_y, max_y)
if polygon.contains(Point(point_x, point_y)):
points_x = np.append(points_x, point_x)
points_y = np.append(points_y, point_y)
i += 1
return points_x, points_y # returns list of points(lat), list of points(long)
def generate_data(state, df_temp, gpdf):
t1 = datetime.datetime.now()
geoid_index_df = df_temp.index.to_numpy()
final_points_x = np.array([])
final_points_y = np.array([])
geoid = np.array([])
# f=0
for index, row in gpdf.iterrows():
# f+=1
points_x = np.array([])
points_y = np.array([])
geoid_temp = np.array([])
if row['GEOID20'] in geoid_index_df and df_temp.loc[row['GEOID20']]>0:
num_points = df_temp.loc[row['GEOID20']]
polygon = row['geometry']
if polygon is not None:
points_x, points_y = random_points_in_polygon(num_points, polygon)
# print(points_x,points_y)
geoid_temp = np.array([row['GEOID20']]*len(points_x))
geoid = np.append(geoid,geoid_temp)
final_points_x = np.append(final_points_x, points_x)
# print(final_points_x)
final_points_y = np.append(final_points_y, points_y)
print('Processing '+str(state)+' - Completed:', "{0:0.2f}".format((index/len(gpdf))*100), '%', end='')
print('', end='\r')
# if f==11:
# break
print('Processing for '+str(state)+' complete \n total time', datetime.datetime.now() - t1)
df_fin = cudf.DataFrame({'GEOID20': geoid,'x': final_points_x, 'y':final_points_y}) #,'COUNTY':county,'p_delta':p_delta,'p_net':p_net})
df_fin.GEOID20 = df_fin.GEOID20.astype('int').astype('str')
df_fin.to_csv('data/migration_population_with_race/population_FL1_%s'%str(state)+'.csv', index=False)
def exec_data(state_key_list):
c=0
for i in state_key_list:
# print(i)
c+=1
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
# path = 'census_2020_data/nhgis0003_shape/nhgis0003_shapefile_tl2020_%s0_block_2020/%s_block_2020.shp'%(i_str,states[i])
path ='data/tl_shapefiles/tl_2021_%s_tabblock20.shp'%(i_str)
#print(path)
print("started reading shape file for state ", states[i])
if os.path.isfile(path):
gpdf = gpd.read_file(path)[['GEOID20', 'geometry']].sort_values('GEOID20').reset_index(drop=True)
gpdf.GEOID20 = gpdf.GEOID20.astype('int64')
gpdf = gpdf[gpdf.GEOID20<=120830010101023]
print("completed reading shape file for state ", states[i])
df_temp = df.query('STATE == @i')[['ID20', 'points']]
df_temp.index = df_temp.ID20
df_temp = df_temp['points']
# print(gpdf.head(3))
# print(df_temp)
print("starting to generate data for "+str(states[i])+"... ")
generate_data(states[i], df_temp, gpdf)
del(df_temp)
else:
print("shape file does not exist")
continue
# if c==2:
# break # states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
# 16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
# 28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
# 40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
# 54:"WV",55:"WI",56:"WY",72:"PR"}
states = {12:"FL"}
exec_data(states.keys())# part1 = cudf.read_csv('data/migration_population_with_race/population_FL1_FL.csv',dtype={'ID20':'int64','x':'float32','y':'float32'})
# part2 = cudf.read_csv('data/migration_population_with_race/population_FL2_FL.csv',dtype={'ID20':'int64','x':'float32','y':'float32'})
# part3 = cudf.read_csv('data/migration_population_with_race/population_FL3_FL.csv',dtype={'ID20':'int64','x':'float32','y':'float32'})
# # part4 = cudf.read_csv('data/migration_population_with_race/population_NY4_NY.csv',dtype={'ID20':'int64','x':'float32','y':'float32'})# fl_df = cudf.concat([part1,part2,part3])# df[df.STATE==12]['points'].sum()# fl_df.to_csv('data/migration_population_with_race/population_FL.csv')def merge_shape_and_states(state_key_list):
concat_states = cudf.DataFrame()
for i in state_key_list:
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path = 'data/migration_population_with_race//population_%s'%str(states[i])+'.csv'
if os.path.isfile(path):
temp = cudf.read_csv(path,dtype={'ID20':'int64','x':'float32','y':'float32'})# Load shape files
concat_states = cudf.concat([concat_states,temp])
else:
print(i,states[i])
print("shape file does not exist")
continue
return concat_statesstates = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
54:"WV",55:"WI",56:"WY",72:"PR"}indv_df = merge_shape_and_states(states.keys()).drop('Unnamed: 0',axis=1)
indv_df.rename(columns={'GEOID20':'ID20'},inplace=True)
indv_df.head()# cpu_df = indv_df.to_pandas()# cpu_df.to_csv('data/final_data_with_race.csv')len(cpu_df)dataset = indv_df.merge(df,on='ID20',how='left').sort_values('ID20')
dataset.head()dataset['P_net'] = dataset['P_delta'].apply(lambda x: -1 if x < 0 else ( 1 if x>0 else 0))cdf = dataset.to_pandas()
cdf.head()# cdf.to_csv('data/final_data_with_race.csv') | 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_net_migration/README.md | # Net Migration dataset generation
## Order of execution
1. gen_table_with_race_migration
2. gen_race_mig_points
3. compute_race
4. assign_race
## Mappings:
### Block Net
<b>1</b>: Inward Migration</br>
<b>0</b>: Stationary</br>
<b>-1</b>: Outward Migration</br>
### Block diff
Integer
### Race
<b>0</b>: All</br>
<b>1</b>: White</br>
<b>2</b>: African American</br>
<b>3</b>: American Indian</br>
<b>4</b>: Asian alone</br>
<b>5</b>: Native Hawaiian</br>
<b>6</b>: Other Race alone</br>
<b>7</b>: Two or More</br>
### County
Mappings for counties can be found in `id2county.pkl` file from root directory.
### Final Dataset
You can download the final net miragtion dataset [here](https://data.rapids.ai/viz-data/net_migration_dataset.parquet)
| 0 |
rapidsai_public_repos/plotly-dash-rapids-census-demo | rapidsai_public_repos/plotly-dash-rapids-census-demo/data_prep_net_migration/gen_points_in_rectangle_fast_script.ipynb | import cudf, cupy
import pandas as pd, numpy as np
import geopandas as gpd
# from shapely.geometry import Point, Polygon
import os
import datetime
import pickledf = pd.read_csv('data/mapped_data_full.csv',encoding='unicode_escape',dtype={'GISJOIN':'int64','ID20':'int64','STATE':'int32','COUNTY':'str','P20':'int32','P10_new':'int32'}).drop('Unnamed: 0',axis=1)
df['P_delta']=df['P20'] - df['eq_P10']
df['P_net']= df['P_delta'].apply(lambda x : 1 if x>0 else 0)
df['number'] = df.P_delta.round().abs().astype('int32')
df.head()# df =df.to_pandas()def Random_Points_in_Bounds(row):
polygon = row.iloc[0]
number = row.iloc[1]
minx, miny, maxx, maxy = polygon.bounds
x = np.random.uniform( minx, maxx, number )
y = np.random.uniform( miny, maxy, number )
return [x, y]
def makeXYpair(row):
l1 = row[0]
l2 = row[1]
return list(map(lambda x, y:[x,y], l1, l2))
def exec_data(state_key_list):
c=0
for i in state_key_list:
c+=1
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path ='data/tl_shapefiles/tl_2021_%s_tabblock20.shp'%(i_str)
print("started reading shape file for state ", states[i])
if os.path.isfile(path):
gpdf = gpd.read_file(path)[['GEOID20', 'geometry']].sort_values('GEOID20').reset_index(drop=True)
gpdf.GEOID20 = gpdf.GEOID20.astype('int64')
print("completed reading shape file for state ", states[i])
df_temp = df.query('STATE == @i')[['ID20', 'number','COUNTY','P_delta','P_net']]
merged_df =pd.merge(gpdf,df_temp[['ID20','number']],left_on='GEOID20',right_on='ID20',how='inner')
merged_df = merged_df[merged_df.number!=0].reset_index(drop=True)
merged_df =merged_df.reset_index(drop=True).drop(columns=['GEOID20'])
print("starting to generate data for "+str(states[i])+"... ")
t1 = datetime.datetime.now()
population_df = pd.DataFrame(merged_df[['geometry','number']].apply(Random_Points_in_Bounds,axis=1),columns=['population'])
points_df = population_df['population'].apply(makeXYpair)
points_df = pd.DataFrame(points_df.explode()).reset_index()
pop_list =points_df['population'].to_list()
final_df =pd.DataFrame(pop_list,columns=['x','y']).reset_index(drop=True)
ids = merged_df.ID20.to_list()
number =merged_df.number.to_list()
rows = []
for id20, n in zip(ids,number):
rows.extend([id20]*n)
final_df['ID20'] = pd.Series(rows)
final_df = final_df.sort_values('ID20').reset_index(drop=True)
final_df = pd.merge(final_df,df_temp, on='ID20',how='left')
final_df.to_csv('data/migration_files1/migration_%s'%str(states[i])+'.csv', index=False)
print("Processing complete for", states[i])
print('Processing for '+str(states[i])+' complete \n total time', datetime.datetime.now() - t1)
del(df_temp)
else:
print("shape file does not exist")
continue# states = {1 :"AL",2 :"AK",4 :"AZ",5 :"AR",6 :"CA",8 :"CO",9 :"CT",10:"DE",11:"DC",12:"FL",13:"GA",15:"HI",
# 16:"ID",17:"IL",18:"IN",19:"IA",20:"KS",21:"KY",22:"LA",23:"ME",24:"MD",25:"MA",26:"MI",27:"MN",
# 28:"MS",29:"MO",30:"MT",31:"NE",32:"NV",33:"NH",34:"NJ",35:"NM",36:"NY",37:"NC",38:"ND",39:"OH",
# 40:"OK",41:"OR",42:"PA",44:"RI",45:"SC",46:"SD",47:"TN",48:"TX",49:"UT",50:"VT",51:"VA",53:"WA",
# 54:"WV",55:"WI",56:"WY",72:"PR"}
states= { 12:"FL",13:"GA",15:"HI",16:"ID",17:"IL",18:"IN",19:"IA",20:"KS}exec_data(states.keys())def merge_shape_and_states(state_key_list):
concat_states = cudf.DataFrame()
for i in state_key_list:
if i< 10:
i_str = '0'+str(i)
else:
i_str = str(i)
path = 'data/migration_files1/migration_%s'%str(states[i])+'.csv'
if os.path.isfile(path):
temp = cudf.read_csv(path,dtype={'ID20':'int64','x':'float32','y':'float32'})# Load shape files
concat_states = cudf.concat([concat_states,temp])
else:
print(path)
print("shape file does not exist")
continue
return concat_statesindv_df = merge_shape_and_states(states.keys())
indv_df.rename(columns={'GEOID20':'ID20'},inplace=True)
indv_df.head()pickle.dump(indv_df,open('fulldata_gpu_2','wb'))
# indv_df = pickle.load(open('fulldata_gpu','rb'))# indv_df = indv_df.to_pandas()indv_df = dask_cudf.from_cudf(indv_df, npartitions=2).persist()# dataset = pd.merge(indv_df,df,on='ID20',how='left')
dataset = indv_df.merge(df,on='ID20',how='left') # merge dask dfslen(dataset)del(indv_df)
del(df)dataset = dataset.sort_values('ID20')
dataset = dataset.drop(columns=['GISJOIN'])
dataset.head()from holoviews.element.tiles import CartoDark
import holoviews as hv
from holoviews.operation.datashader import datashade,rasterize,shade
from plotly.colors import sequential
hv.extension('plotly')dataset["easting"], dataset["northing"] = hv.Tiles.lon_lat_to_easting_northing(dataset["x"], dataset["y"])
dataset.head()dataset = hv.Dataset(dataset)mapbox_token = 'pk.eyJ1IjoibmlzaGFudGoiLCJhIjoiY2w1aXpwMXlkMDEyaDNjczBkZDVjY2l6dyJ9.7oLijsue-xOICmTqNInrBQ'
tiles= hv.Tiles().opts(mapboxstyle="dark", accesstoken=mapbox_token)
points = datashade(hv.Points(dataset, ["easting", "northing"]),cmap=sequential.Plasma)(tiles*points).opts(width=1800, height=500) | 0 |
rapidsai_public_repos | rapidsai_public_repos/scikit-learn-nv/pyproject.toml | [build-system]
requires = ["setuptools>=61.0"]
build-backend = "setuptools.build_meta"
[project]
name = "scikit-learn-nv"
version = "0.0.1"
authors = [
{ name="Tim Head", email="betatim@gmail.com" },
]
description = "A NVIDIA accelerated backend for scikit-learn"
readme = "README.md"
requires-python = ">=3.7"
dependencies = [
"scikit-learn"
]
classifiers = [
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
]
[project.urls]
"Homepage" = "https://github.com/rapidsai/scikit-learn-nv"
"Bug Tracker" = "https://github.com/rapidsai/scikit-learn-nv/issues"
[project.entry-points.sklearn_engines]
kmeans = "sklearn_nv.kmeans:KMeansEngine"
[tool.black]
target_version = ['py38', 'py39', 'py310']
preview = true
exclude = '''
/(
\.eggs # exclude a few common directories in the
| \.git # root of the project
| \.vscode
)/
''' | 0 |
rapidsai_public_repos | rapidsai_public_repos/scikit-learn-nv/README.md | # A GPU acceleration plugin for scikit-learn
> This is a proof-of-concept. Everything might change.
## Install
To install this plugin you need to install a in development version of scikit-learn as well
as this plugin, and its dependencies:
1. Create a new conda environment:
```commandline
conda create -n sklearn-nv -c conda-forge python=3.9 numpy scipy matplotlib cython compilers joblib threadpoolctl
```
2. Activate the environment:
```commandline
conda activate sklearn-nv
```
3. Install the nightly build of `pylibraft`:
```commandline
conda install -c conda-forge -c rapidsai-nightly -c nvidia --no-channel-priority pylibraft=23.04 cupy rmm
```
4. Checkout the development version of scikit-learn from the `feature/engine-api` branch
(see [`scikit-learn#25535`](https://github.com/scikit-learn/scikit-learn/pull/25535). Using [`gh`](https://cli.github.com/):
```commandline
gh repo clone scikit-learn/scikit-learn
(cd scikit-learn; gh pr checkout 25535)
```
5. Install the scikit-learn development version by running the following in the folder you checked the code out to:
```commandline
pip install --no-build-isolation -e .
```
6. Install this plugin by checking out this repository. Using [`gh`](https://cli.github.com/):
```commandline
gh repo clone rapidsai/scikit-learn-nv
pip install -e scikit-learn-nv
```
## Running
To try it out, enable the plugin using:
```python
import sklearn
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
import cupy as cp
X, y_true = make_blobs(
n_samples=300, centers=4, n_features=3, cluster_std=0.60, random_state=10
)
with sklearn.config_context(engine_provider="sklearn_nv"):
km = KMeans(random_state=42)
km.fit(X)
y_pred = km.predict(X)
```
| 0 |
rapidsai_public_repos | rapidsai_public_repos/scikit-learn-nv/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/scikit-learn-nv | rapidsai_public_repos/scikit-learn-nv/tests/test_kmeans.py | import time
import numpy as np
import sklearn
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from sklearn_nv.kmeans import _is_same_clustering
def same_rows(a, b):
"""Determine if the rows are the same, up to a permutation.
Check that all rows that appear in `b` are also in `a`, allowing
for the order of the rows to be different.
"""
if a.shape != b.shape:
return False
for row_a in a:
for row_b in b:
if np.allclose(row_a, row_b):
break
else:
return False
return True
def test_sklearn_equivalence():
X, y_true = make_blobs(
n_samples=30, centers=4, n_features=3, cluster_std=0.60, random_state=10
)
n_clusters = 4
kmeans_args = dict(n_clusters=n_clusters, random_state=42, n_init=1)
# Default implementation
km = KMeans(**kmeans_args)
km.fit(X)
y_pred = km.predict(X)
# Using the accelerated version
with sklearn.config_context(engine_provider="sklearn_nv"):
km2 = KMeans(**kmeans_args)
km2.fit(X)
y_pred2 = km2.predict(X)
assert same_rows(km.cluster_centers_, km2.cluster_centers_)
assert np.allclose(km.inertia_, km2.inertia_)
assert _is_same_clustering(y_pred, y_pred2, n_clusters)
assert km.n_iter_ == km2.n_iter_
| 0 |
rapidsai_public_repos/scikit-learn-nv/src | rapidsai_public_repos/scikit-learn-nv/src/sklearn_nv/kmeans.py | import time
import numpy as np
import scipy.sparse as sp
import cupy as cp
from sklearn.utils import check_random_state, check_array
from sklearn.utils.validation import _check_sample_weight, _is_arraylike_not_scalar
from pylibraft.distance import fused_l2_nn_argmin
from pylibraft.cluster.kmeans import compute_new_centroids, init_plus_plus
def _get_namespace(X):
"""Get namespace of array.
This will get thge "Array API namespace of the array, if it has one. CuPY and
Numpy arrays are special cases where the respective namespace is returned,
even if the arrays are not defined via the Array API.
"""
if isinstance(X, cp.ndarray):
return cp
elif isinstance(X, np.ndarray):
return np
if hasattr(X, "__array_namespace__"):
return X.__array_namespace__()
def pairwise_distances_argmin(X, Y, handle=None):
labels = cp.zeros((X.shape[0], 1), dtype=np.int32)
fused_l2_nn_argmin(X, Y, labels, handle=handle)
return labels
def compute_centroids_(X, sample_weight, labels, centroids, new_centroids, handle=None):
compute_new_centroids(
X,
centroids,
labels,
new_centroids,
sample_weights=sample_weight,
handle=handle,
)
def _tolerance(X, tol):
"""Scale `tol` according to the dataset."""
xp = _get_namespace(X)
variances = xp.var(X, axis=0)
return xp.mean(variances) * tol
def _is_same_clustering(labels1, labels2, n_clusters):
"""Check if two arrays of labels are the same up to a permutation of the labels"""
xp = _get_namespace(labels1)
mapping = xp.full(fill_value=-1, shape=(n_clusters,), dtype=xp.int32)
for i in range(labels1.shape[0]):
if mapping[labels1[i]] == -1:
mapping[labels1[i]] = labels2[i]
elif mapping[labels1[i]] != labels2[i]:
return False
return True
class KMeansEngine: # (_kmeans.KMeansCythonEngine):
def __init__(self, estimator):
self.estimator = estimator
def accepts(self, X, y=None, sample_weight=None):
"""Determine if input data and hyper-parameters are supported by
this engine.
Determine if this engine can handle the hyper-parameters of the
estimator as well as the input data. If not, return `False`. This
method is called during engine selection, where each enabled engine
is tried in the user defined order.
Should fail as quickly as possible.
"""
if self.estimator.init != "k-means++":
return False
if self.estimator.algorithm != "lloyd":
return False
if sp.issparse(X):
return False
return True
def prepare_fit(self, X, y=None, sample_weight=None):
estimator = self.estimator
X = estimator._validate_data(
X,
accept_sparse="csr",
dtype=[np.float64, np.float32],
order="C",
copy=estimator.copy_x,
accept_large_sparse=False,
)
# this sets estimator _algorithm implicitly
# XXX: shall we explose this logic as part of then engine API?
# or is the current API flexible enough?
estimator._check_params_vs_input(X)
# TODO: delegate rng and sample weight checks to engine
random_state = check_random_state(estimator.random_state)
sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
# Validate init array
init = estimator.init
init_is_array_like = _is_arraylike_not_scalar(init)
if init_is_array_like:
init = check_array(init, dtype=X.dtype, copy=True, order="C")
estimator._validate_center_shape(X, init)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
xp = _get_namespace(X)
X_mean = xp.mean(X, axis=0)
# The copy was already done above
X -= X_mean
if init_is_array_like:
init -= X_mean
self.X_mean = X_mean
self.random_state = random_state
self.tol = _tolerance(X, estimator.tol)
self.init = init
# Used later to unshift X again, need to explicity convert to
# cupy array
self.X_mean = cp.asarray(self.X_mean)
X_ = cp.asarray(X)
sample_weight_ = cp.asarray(sample_weight)
return X_, y, sample_weight_
def unshift_centers(self, X, best_centers):
if not sp.issparse(X):
if not self.estimator.copy_x:
X += self.X_mean
best_centers += self.X_mean
def init_centroids(self, X):
# XXX What is a better way to pass the random state to raft?
seed = self.random_state.randint(
np.iinfo(np.int64).min, np.iinfo(np.int64).max
) + abs(np.iinfo(np.int64).min)
initial_centroids = init_plus_plus(
X,
self.estimator.n_clusters,
seed=seed,
)
return cp.asarray(initial_centroids)
def count_distinct_clusters(self, cluster_labels):
# XXX need to check that the clusters are actually unique
# XXX cp.unique() doesn't support the axis arg yet :(
return cluster_labels.shape[0]
def prepare_prediction(self, X, sample_weight=None):
X = cp.asarray(X)
if sample_weight is not None:
sample_weight = cp.asarray(sample_weight)
return X, sample_weight
def get_labels(self, X, sample_weight):
X = cp.asarray(X)
labels = pairwise_distances_argmin(
X,
self.estimator.cluster_centers_,
)
return labels.ravel()
def is_same_clustering(self, labels, best_labels, n_clusters):
return _is_same_clustering(labels, best_labels, n_clusters)
def kmeans_single(self, X, sample_weight, current_centroids):
labels = None
new_centroids = cp.zeros_like(current_centroids)
for n_iter in range(self.estimator.max_iter):
# E(xpectation)
new_labels = pairwise_distances_argmin(
X,
current_centroids,
)
# M(aximization)
# Compute the new centroids using the weighted sum of points in each cluster
compute_centroids_(
X,
sample_weight,
new_labels,
current_centroids,
new_centroids,
)
if n_iter > 0:
if cp.array_equal(labels, new_labels):
break
else:
center_shift = cp.power(current_centroids - new_centroids, 2).sum()
if center_shift <= self.tol:
break
current_centroids = new_centroids
labels = new_labels
labels = labels.ravel()
inertia = 0.0
for n, center in enumerate(current_centroids):
inertia += (
cp.power(X[labels == n] - center, 2).sum(1) * sample_weight[labels == n]
).sum()
return labels, inertia, current_centroids, n_iter + 1
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dgl-cugraph-build-environment/cugraph_stable_torch.Dockerfile | ARG CUDA_VER=11.8
ARG LINUX_VER=ubuntu20.04
FROM gpuci/miniforge-cuda:$CUDA_VER-devel-$LINUX_VER
ARG PYTHON_VER=3.10
ARG RAPIDS_VER=23.04
ARG PYTORCH_VER=2.0.0
ARG CUDATOOLKIT_VER=11.8
RUN conda config --set ssl_verify false
RUN conda install -c gpuci gpuci-tools
RUN gpuci_conda_retry install -c conda-forge mamba
RUN gpuci_mamba_retry install -y -c pytorch -c rapidsai -c conda-forge -c nvidia \
cudf=$RAPIDS_VER \
cugraph=$RAPIDS_VER \
dask-cudf=$RAPIDS_VER \
dask-cuda=$RAPIDS_VER \
pylibcugraphops=$RAPIDS_VER \
pytorch=$PYTORCH_VER \
pytorch-cuda=$CUDATOOLKIT_VER \
python=$PYTHON_VER \
setuptools \
scipy \
networkx \
requests \
cmake \
make \
tqdm
# RUN cd / && git clone https://github.com/dmlc/dgl.git
# RUN cd / && cd dgl && git submodule update --init --recursive
# RUN cd /dgl && mkdir build && cd build && cmake -DUSE_CUDA=ON -DUSE_NCCL=ON -DBUILD_CPP_TEST=ON .. && make -j64
| 0 |
rapidsai_public_repos | rapidsai_public_repos/dgl-cugraph-build-environment/README.md | # Dgl Cugraph Build Environment
This repo is responsible for building stable docker images that will be consumed by DGL for their CI.
Docker Images Link: https://hub.docker.com/r/rapidsai/cugraph_stable_torch-cuda/
| 0 |
rapidsai_public_repos | rapidsai_public_repos/gpu-bdb/README.md | # RAPIDS GPU-BDB
## Disclaimer
gpu-bdb is derived from [TPCx-BB](http://www.tpc.org/tpcx-bb/). Any results based on gpu-bdb are considered unofficial results, and per [TPC](http://www.tpc.org/) policy, cannot be compared against official TPCx-BB results.
## Overview
The GPU Big Data Benchmark (gpu-bdb) is a [RAPIDS](https://rapids.ai) library based benchmark for enterprises that includes 30 queries representing real-world ETL & ML workflows at various "scale factors": SF1000 is 1 TB of data, SF10000 is 10TB. Each “query” is in fact a model workflow that can include SQL, user-defined functions, careful sub-setting and aggregation, and machine learning.
## Conda Environment Setup
We provide a conda environment definition specifying all RAPIDS dependencies needed to run our query implementations. To install and activate it:
```bash
CONDA_ENV="rapids-gpu-bdb"
conda env create --name $CONDA_ENV -f gpu-bdb/conda/rapids-gpu-bdb.yml
conda activate rapids-gpu-bdb
```
### Installing RAPIDS bdb Tools
This repository includes a small local module containing utility functions for running the queries. You can install it with the following:
```bash
cd gpu-bdb/gpu_bdb
python -m pip install .
```
This will install a package named `bdb-tools` into your Conda environment. It should look like this:
```bash
conda list | grep bdb
bdb-tools 0.2 pypi_0 pypi
```
Note that this Conda environment needs to be replicated or installed manually on all nodes, which will allow starting one dask-cuda-worker per node.
## NLP Query Setup
Queries 10, 18, and 19 depend on two static (negativeSentiment.txt, positiveSentiment.txt) files. As we cannot redistribute those files, you should [download the tpcx-bb toolkit](http://www.tpc.org/tpc_documents_current_versions/download_programs/tools-download-request5.asp?bm_type=TPCX-BB&bm_vers=1.3.1&mode=CURRENT-ONLY) and extract them to your data directory on your shared filesystem:
```
jar xf bigbenchqueriesmr.jar
cp tpcx-bb1.3.1/distributions/Resources/io/bigdatabenchmark/v1/queries/q10/*.txt ${DATA_DIR}/sentiment_files/
```
For Query 27, we rely on [spacy](https://spacy.io/). To download the necessary language model after activating the Conda environment:
```bash
python -m spacy download en_core_web_sm
````
## Starting Your Cluster
We use the `dask-scheduler` and `dask-cuda-worker` command line interfaces to start a Dask cluster. We provide a `cluster_configuration` directory with a bash script to help you set up an NVLink-enabled cluster using UCX.
Before running the script, you'll make changes specific to your environment.
In `cluster_configuration/cluster-startup.sh`:
- Update `GPU_BDB_HOME=...` to location on disk of this repo
- Update `CONDA_ENV_PATH=...` to refer to your conda environment path.
- Update `CONDA_ENV_NAME=...` to refer to the name of the conda environment you created, perhaps using the `yml` files provided in this repository.
- Update `INTERFACE=...` to refer to the relevant network interface present on your cluster.
- Update `CLUSTER_MODE="TCP"` to refer to your communication method, either "TCP" or "NVLINK". You can also configure this as an environment variable.
- You may also need to change the `LOCAL_DIRECTORY` and `WORKER_DIR` depending on your filesystem. Make sure that these point to a location to which you have write access and that `LOCAL_DIRECTORY` is accessible from all nodes.
To start up the cluster on your scheduler node, please run the following from `gpu_bdb/cluster_configuration/`. This will spin up a scheduler and one Dask worker per GPU.
```bash
DASK_JIT_UNSPILL=True CLUSTER_MODE=NVLINK bash cluster-startup.sh SCHEDULER
```
Note: Don't use DASK_JIT_UNSPILL when running BlazingSQL queries.
Then run the following on every other node from `gpu_bdb/cluster_configuration/`.
```bash
bash cluster-startup.sh
```
This will spin up one Dask worker per GPU. If you are running on a single node, you will only need to run `bash cluster-startup.sh SCHEDULER`.
If you are using a Slurm cluster, please adapt the example Slurm setup in `gpu_bdb/benchmark_runner/slurm/` which uses `gpu_bdb/cluster_configuration/cluster-startup-slurm.sh`.
## Running the Queries
To run a query, starting from the repository root, go to the query specific subdirectory. For example, to run q07:
```bash
cd gpu_bdb/queries/q07/
```
The queries assume that they can attach to a running Dask cluster. Cluster address and other benchmark configuration lives in a yaml file (`gpu_bdb/benchmark_runner/becnhmark_config.yaml`). You will need to fill this out as appropriate if you are not using the Slurm cluster configuration.
```bash
conda activate rapids-gpu-bdb
python gpu_bdb_query_07.py --config_file=../../benchmark_runner/benchmark_config.yaml
```
To NSYS profile a gpu-bdb query, change `start_local_cluster` in benchmark_config.yaml to `True` and run:
```bash
nsys profile -t cuda,nvtx python gpu_bdb_query_07_dask_sql.py --config_file=../../benchmark_runner/benchmark_config.yaml
```
Note: There is no need to start workers with `cluster-startup.sh` as
there is a `LocalCUDACluster` being started in `attach_to_cluster` API.
## Performance Tracking
This repository includes optional performance-tracking automation using Google Sheets. To enable logging query runtimes, on the client node:
```
export GOOGLE_SHEETS_CREDENTIALS_PATH=<path to creds.json>
```
Then configure the `--sheet` and `--tab` arguments in `benchmark_config.yaml`.
### Running all of the Queries
The included `benchmark_runner.py` script will run all queries sequentially. Configuration for this type of end-to-end run is specified in `benchmark_runner/benchmark_config.yaml`.
To run all queries, cd to `gpu_bdb/` and:
```python
python benchmark_runner.py --config_file benchmark_runner/benchmark_config.yaml
```
By default, this will run each Dask query five times, and, if BlazingSQL queries are enabled in `benchmark_config.yaml`, each BlazingSQL query five times. You can control the number of repeats by changing the `N_REPEATS` variable in the script.
## BlazingSQL
BlazingSQL implementations of all queries are included. BlazingSQL currently supports communication via TCP. To run BlazingSQL queries, please follow the instructions above to create a cluster using `CLUSTER_MODE=TCP`.
## Data Generation
The RAPIDS queries expect [Apache Parquet](http://parquet.apache.org/) formatted data. We provide a [script](gpu_bdb/queries/load_test/gpu_bdb_load_test.py) which can be used to convert bigBench dataGen's raw CSV files to optimally sized Parquet partitions.
| 0 |
rapidsai_public_repos | rapidsai_public_repos/gpu-bdb/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2020 NVIDIA Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/gpu-bdb | rapidsai_public_repos/gpu-bdb/conda/rapids-gpu-bdb-dask-sql-cuda11.2.yml | channels:
- rapidsai-nightly
- nvidia
- conda-forge
dependencies:
- python=3.8
- cudatoolkit=11.2
- cudf
- rmm
- dask-cuda
- dask-cudf
- cuml
- dask
- distributed
- ucx-py
- ucx-proc=*=gpu
- dask-sql>=2022.1
- numba>=0.54.*
- scipy
- scikit-learn
- cupy
- spacy=2.3
- oauth2client
- asyncssh
- psutil
- ipykernel
- jupyterlab
- gspread
- pytest
- pip
- pip:
- jupyter-server-proxy
| 0 |
rapidsai_public_repos/gpu-bdb | rapidsai_public_repos/gpu-bdb/conda/rapids-gpu-bdb.yml | channels:
- rapidsai-nightly
- nvidia
- conda-forge
dependencies:
- python=3.8
- cudatoolkit=11.0
- cudf
- rmm
- dask-cuda
- dask-cudf
- cuml
- dask
- distributed
- ucx-py
- ucx-proc=*=gpu
- blazingsql
- scipy
- scikit-learn
- cupy
- spacy=2.3
- oauth2client
- asyncssh
- psutil
- ipykernel
- jupyterlab
- gspread
- oauth2client
- pytest
- pip
- pip:
- jupyter-server-proxy
| 0 |
rapidsai_public_repos/gpu-bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/benchmark_runner.py | import glob
import re
import os
import gc
import time
import uuid
import sys
N_REPEATS = 5
def get_qnum_from_filename(name):
m = re.search("[0-9]{2}", name).group()
return m
def load_query(qnum, fn):
import importlib, types
loader = importlib.machinery.SourceFileLoader(qnum, fn)
mod = types.ModuleType(loader.name)
loader.exec_module(mod)
return mod.main
def prevent_sql_import(dask_sql_modules):
for mod in dask_sql_modules:
# set the dask_sql module to any placeholder module
# so that we never actually import dask_sql on the workers:
sys.modules[mod] = sys
def is_jvm_started():
import jpype
return jpype.isJVMStarted()
dask_qnums = [str(i).zfill(2) for i in range(1, 31)]
sql_qnums = [str(i).zfill(2) for i in range(1, 31)]
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster, import_query_libs
from bdb_tools.utils import run_query, gpubdb_argparser
import_query_libs()
config = gpubdb_argparser()
config["run_id"] = uuid.uuid4().hex
include_sql = config.get("benchmark_runner_include_sql")
dask_queries = {
qnum: load_query(qnum, f"queries/q{qnum}/gpu_bdb_query_{qnum}.py")
for qnum in dask_qnums
}
if include_sql:
sql_queries = {
qnum: load_query(qnum, f"queries/q{qnum}/gpu_bdb_query_{qnum}_dask_sql.py")
for qnum in sql_qnums
}
client, c = attach_to_cluster(config, create_sql_context=include_sql)
# Preload required libraries for queries on all workers
try:
import dask_sql
except ImportError:
dask_sql = None
if dask_sql is not None:
dask_sql_modules = [m for m in sys.modules if m.startswith('dask_sql')]
client.run(prevent_sql_import, dask_sql_modules)
print("is_jvm_started", client.run(is_jvm_started))
client.run(import_query_libs)
base_path = os.getcwd()
# Run Dask SQL Queries
if include_sql and len(sql_qnums) > 0:
print("Dask SQL Queries")
for r in range(N_REPEATS):
for qnum, q_func in sql_queries.items():
print(f"run {r+1}: q{qnum}")
qpath = f"{base_path}/queries/q{qnum}/"
os.chdir(qpath)
if os.path.exists("current_query_num.txt"):
os.remove("current_query_num.txt")
with open("current_query_num.txt", "w") as fp:
fp.write(qnum)
run_query(
config=config,
client=client,
query_func=q_func,
sql_context=c,
)
client.run(gc.collect)
client.run_on_scheduler(gc.collect)
gc.collect()
time.sleep(3)
# Run Pure Dask Queries
if len(dask_qnums) > 0:
print("Pure Dask Queries")
for r in range(N_REPEATS):
for qnum, q_func in dask_queries.items():
print(f"run {r+1}: q{qnum}")
qpath = f"{base_path}/queries/q{qnum}/"
os.chdir(qpath)
if os.path.exists("current_query_num.txt"):
os.remove("current_query_num.txt")
with open("current_query_num.txt", "w") as fp:
fp.write(qnum)
run_query(config=config, client=client, query_func=q_func)
client.run(gc.collect)
client.run_on_scheduler(gc.collect)
gc.collect()
time.sleep(3)
| 0 |
rapidsai_public_repos/gpu-bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/setup.py | # Copyright (c) 2020, NVIDIA CORPORATION.
from setuptools import find_packages, setup
qnums = [str(i).zfill(2) for i in range(1, 31)]
setup(
name="bdb_tools",
version="0.2",
author="RAPIDS",
packages=["benchmark_runner", "bdb_tools"],
package_data={"benchmark_runner": ["benchmark_config.yaml"]},
include_package_data=True,
)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/web_returns.schema | wr_returned_date_sk bigint
, wr_returned_time_sk bigint
, wr_item_sk bigint --not null
, wr_refunded_customer_sk bigint
, wr_refunded_cdemo_sk bigint
, wr_refunded_hdemo_sk bigint
, wr_refunded_addr_sk bigint
, wr_returning_customer_sk bigint
, wr_returning_cdemo_sk bigint
, wr_returning_hdemo_sk bigint
, wr_returning_addr_sk bigint
, wr_web_page_sk bigint
, wr_reason_sk bigint
, wr_order_number bigint --not null
, wr_return_quantity int
, wr_return_amt decimal(7,2)
, wr_return_tax decimal(7,2)
, wr_return_amt_inc_tax decimal(7,2)
, wr_fee decimal(7,2)
, wr_return_ship_cost decimal(7,2)
, wr_refunded_cash decimal(7,2)
, wr_reversed_charge decimal(7,2)
, wr_account_credit decimal(7,2)
, wr_net_loss decimal(7,2) | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/warehouse.schema | w_warehouse_sk bigint --not null
, w_warehouse_id string --not null
, w_warehouse_name string
, w_warehouse_sq_ft int
, w_street_number string
, w_street_name string
, w_street_type string
, w_suite_number string
, w_city string
, w_county string
, w_state string
, w_zip string
, w_country string
, w_gmt_offset decimal(5,2) | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/web_clickstreams.schema | wcs_click_date_sk bigint
, wcs_click_time_sk bigint
, wcs_sales_sk bigint
, wcs_item_sk bigint
, wcs_web_page_sk bigint
, wcs_user_sk bigint | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/customer_address.schema | ca_address_sk bigint --not null
, ca_address_id string --not null
, ca_street_number string
, ca_street_name string
, ca_street_type string
, ca_suite_number string
, ca_city string
, ca_county string
, ca_state string
, ca_zip string
, ca_country string
, ca_gmt_offset decimal(5,2)
, ca_location_type string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/customer.schema | c_customer_sk bigint --not null
, c_customer_id string --not null
, c_current_cdemo_sk bigint
, c_current_hdemo_sk bigint
, c_current_addr_sk bigint
, c_first_shipto_date_sk bigint
, c_first_sales_date_sk bigint
, c_salutation string
, c_first_name string
, c_last_name string
, c_preferred_cust_flag string
, c_birth_day int
, c_birth_month int
, c_birth_year int
, c_birth_country string
, c_login string
, c_email_address string
, c_last_review_date string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/item.schema | i_item_sk bigint --not null
, i_item_id string --not null
, i_rec_start_date string
, i_rec_end_date string
, i_item_desc string
, i_current_price decimal(7,2)
, i_wholesale_cost decimal(7,2)
, i_brand_id int
, i_brand string
, i_class_id int
, i_class string
, i_category_id int
, i_category string
, i_manufact_id int
, i_manufact string
, i_size string
, i_formulation string
, i_color string
, i_units string
, i_container string
, i_manager_id int
, i_product_name string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/ship_mode.schema | sm_ship_mode_sk bigint --not null
, sm_ship_mode_id string --not null
, sm_type string
, sm_code string
, sm_carrier string
, sm_contract string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/income_band.schema | ib_income_band_sk bigint --not null
, ib_lower_bound int
, ib_upper_bound int | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/household_demographics.schema | hd_demo_sk bigint --not null
, hd_income_band_sk bigint
, hd_buy_potential string
, hd_dep_count int
, hd_vehicle_count int | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/web_page.schema | wp_web_page_sk bigint --not null
, wp_web_page_id string --not null
, wp_rec_start_date string
, wp_rec_end_date string
, wp_creation_date_sk bigint
, wp_access_date_sk bigint
, wp_autogen_flag string
, wp_customer_sk bigint
, wp_url string
, wp_type string
, wp_char_count int
, wp_link_count int
, wp_image_count int
, wp_max_ad_count int | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/web_sales.schema | ws_sold_date_sk bigint
, ws_sold_time_sk bigint
, ws_ship_date_sk bigint
, ws_item_sk bigint --not null
, ws_bill_customer_sk bigint
, ws_bill_cdemo_sk bigint
, ws_bill_hdemo_sk bigint
, ws_bill_addr_sk bigint
, ws_ship_customer_sk bigint
, ws_ship_cdemo_sk bigint
, ws_ship_hdemo_sk bigint
, ws_ship_addr_sk bigint
, ws_web_page_sk bigint
, ws_web_site_sk bigint
, ws_ship_mode_sk bigint
, ws_warehouse_sk bigint
, ws_promo_sk bigint
, ws_order_number bigint --not null
, ws_quantity int
, ws_wholesale_cost decimal(7,2)
, ws_list_price decimal(7,2)
, ws_sales_price decimal(7,2)
, ws_ext_discount_amt decimal(7,2)
, ws_ext_sales_price decimal(7,2)
, ws_ext_wholesale_cost decimal(7,2)
, ws_ext_list_price decimal(7,2)
, ws_ext_tax decimal(7,2)
, ws_coupon_amt decimal(7,2)
, ws_ext_ship_cost decimal(7,2)
, ws_net_paid decimal(7,2)
, ws_net_paid_inc_tax decimal(7,2)
, ws_net_paid_inc_ship decimal(7,2)
, ws_net_paid_inc_ship_tax decimal(7,2)
, ws_net_profit decimal(7,2) | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/reason.schema | r_reason_sk bigint --not null
, r_reason_id string --not null
, r_reason_desc string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/inventory.schema | inv_date_sk bigint --not null
, inv_item_sk bigint --not null
, inv_warehouse_sk bigint --not null
, inv_quantity_on_hand int | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/web_site.schema | web_site_sk bigint --not null
, web_site_id string --not null
, web_rec_start_date string
, web_rec_end_date string
, web_name string
, web_open_date_sk bigint
, web_close_date_sk bigint
, web_class string
, web_manager string
, web_mkt_id int
, web_mkt_class string
, web_mkt_desc string
, web_market_manager string
, web_company_id int
, web_company_name string
, web_street_number string
, web_street_name string
, web_street_type string
, web_suite_number string
, web_city string
, web_county string
, web_state string
, web_zip string
, web_country string
, web_gmt_offset decimal(5,2)
, web_tax_percentage decimal(5,2) | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/promotion.schema | p_promo_sk bigint --not null
, p_promo_id string --not null
, p_start_date_sk bigint
, p_end_date_sk bigint
, p_item_sk bigint
, p_cost decimal(15,2)
, p_response_target int
, p_promo_name string
, p_channel_dmail string
, p_channel_email string
, p_channel_catalog string
, p_channel_tv string
, p_channel_radio string
, p_channel_press string
, p_channel_event string
, p_channel_demo string
, p_channel_details string
, p_purpose string
, p_discount_active string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/store_sales.schema | ss_sold_date_sk bigint
, ss_sold_time_sk bigint
, ss_item_sk bigint --not null
, ss_customer_sk bigint
, ss_cdemo_sk bigint
, ss_hdemo_sk bigint
, ss_addr_sk bigint
, ss_store_sk bigint
, ss_promo_sk bigint
, ss_ticket_number bigint --not null
, ss_quantity int
, ss_wholesale_cost decimal(7,2)
, ss_list_price decimal(7,2)
, ss_sales_price decimal(7,2)
, ss_ext_discount_amt decimal(7,2)
, ss_ext_sales_price decimal(7,2)
, ss_ext_wholesale_cost decimal(7,2)
, ss_ext_list_price decimal(7,2)
, ss_ext_tax decimal(7,2)
, ss_coupon_amt decimal(7,2)
, ss_net_paid decimal(7,2)
, ss_net_paid_inc_tax decimal(7,2)
, ss_net_profit decimal(7,2) | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/item_marketprices.schema | imp_sk bigint --not null
, imp_item_sk bigint --not null
, imp_competitor string
, imp_competitor_price decimal(7,2)
, imp_start_date bigint
, imp_end_date bigint | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/time_dim.schema | t_time_sk bigint --not null
, t_time_id string --not null
, t_time int
, t_hour int
, t_minute int
, t_second int
, t_am_pm string
, t_shift string
, t_sub_shift string
, t_meal_time string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/store.schema | s_store_sk bigint --not null
, s_store_id string --not null
, s_rec_start_date string
, s_rec_end_date string
, s_closed_date_sk bigint
, s_store_name string
, s_number_employees int
, s_floor_space int
, s_hours string
, s_manager string
, s_market_id int
, s_geography_class string
, s_market_desc string
, s_market_manager string
, s_division_id int
, s_division_name string
, s_company_id int
, s_company_name string
, s_street_number string
, s_street_name string
, s_street_type string
, s_suite_number string
, s_city string
, s_county string
, s_state string
, s_zip string
, s_country string
, s_gmt_offset decimal(5,2)
, s_tax_precentage decimal(5,2) | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/product_reviews.schema | pr_review_sk bigint --not null
, pr_review_date string
, pr_review_time string
, pr_review_rating int --not null
, pr_item_sk bigint --not null
, pr_user_sk bigint
, pr_order_sk bigint
, pr_review_content string --not null | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/date_dim.schema | d_date_sk bigint --not null
, d_date_id string --not null
, d_date string
, d_month_seq int
, d_week_seq int
, d_quarter_seq int
, d_year int
, d_dow int
, d_moy int
, d_dom int
, d_qoy int
, d_fy_year int
, d_fy_quarter_seq int
, d_fy_week_seq int
, d_day_name string
, d_quarter_name string
, d_holiday string
, d_weekend string
, d_following_holiday string
, d_first_dom int
, d_last_dom int
, d_same_day_ly int
, d_same_day_lq int
, d_current_day string
, d_current_week string
, d_current_month string
, d_current_quarter string
, d_current_year string | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/store_returns.schema | sr_returned_date_sk bigint
, sr_return_time_sk bigint
, sr_item_sk bigint --not null
, sr_customer_sk bigint
, sr_cdemo_sk bigint
, sr_hdemo_sk bigint
, sr_addr_sk bigint
, sr_store_sk bigint
, sr_reason_sk bigint
, sr_ticket_number bigint --not null
, sr_return_quantity int
, sr_return_amt decimal(7,2)
, sr_return_tax decimal(7,2)
, sr_return_amt_inc_tax decimal(7,2)
, sr_fee decimal(7,2)
, sr_return_ship_cost decimal(7,2)
, sr_refunded_cash decimal(7,2)
, sr_reversed_charge decimal(7,2)
, sr_store_credit decimal(7,2)
, sr_net_loss decimal(7,2) | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/spark_table_schemas/customer_demographics.schema | cd_demo_sk bigint ----not null
, cd_gender string
, cd_marital_status string
, cd_education_status string
, cd_purchase_estimate int
, cd_credit_rating string
, cd_dep_count int
, cd_dep_employed_count int
, cd_dep_college_count int | 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/__init__.py | # Copyright (c) 2020, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q03/gpu_bdb_query_03.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q03_utils import (
apply_find_items_viewed,
q03_purchased_item_IN,
q03_purchased_item_category_IN,
q03_limit,
read_tables
)
from distributed import wait
import numpy as np
import glob
from dask import delayed
def get_wcs_minima(config):
wcs_df = dask_cudf.read_parquet(
os.path.join(config["data_dir"], "web_clickstreams/*.parquet"),
columns=["wcs_click_date_sk", "wcs_click_time_sk"],
)
wcs_df["tstamp"] = wcs_df["wcs_click_date_sk"] * 86400 + wcs_df["wcs_click_time_sk"]
wcs_tstamp_min = wcs_df["tstamp"].min().compute()
return wcs_tstamp_min
def pre_repartition_task(wcs_fn, item_df, wcs_tstamp_min):
wcs_cols = [
"wcs_user_sk",
"wcs_sales_sk",
"wcs_item_sk",
"wcs_click_date_sk",
"wcs_click_time_sk",
]
wcs_df = cudf.read_parquet(wcs_fn, columns=wcs_cols)
wcs_df = wcs_df.dropna(axis=0, subset=["wcs_user_sk", "wcs_item_sk"])
wcs_df["tstamp"] = wcs_df["wcs_click_date_sk"] * 86400 + wcs_df["wcs_click_time_sk"]
wcs_df["tstamp"] = wcs_df["tstamp"] - wcs_tstamp_min
wcs_df["tstamp"] = wcs_df["tstamp"].astype("int32")
wcs_df["wcs_user_sk"] = wcs_df["wcs_user_sk"].astype("int32")
wcs_df["wcs_sales_sk"] = wcs_df["wcs_sales_sk"].astype("int32")
wcs_df["wcs_item_sk"] = wcs_df["wcs_item_sk"].astype("int32")
merged_df = wcs_df.merge(
item_df, left_on=["wcs_item_sk"], right_on=["i_item_sk"], how="inner"
)
del wcs_df
del item_df
cols_keep = [
"wcs_user_sk",
"tstamp",
"wcs_item_sk",
"wcs_sales_sk",
"i_category_id",
]
merged_df = merged_df[cols_keep]
merged_df["wcs_user_sk"] = merged_df["wcs_user_sk"].astype("int32")
merged_df["wcs_sales_sk"] = merged_df["wcs_sales_sk"].astype("int32")
merged_df["wcs_item_sk"] = merged_df["wcs_item_sk"].astype("int32")
merged_df["wcs_sales_sk"] = merged_df.wcs_sales_sk.fillna(0)
return merged_df
def reduction_function(df, item_df_filtered):
"""
Combines all the reduction ops into a single frame
"""
product_view_results = apply_find_items_viewed(df, item_mappings=item_df_filtered)
grouped_df = product_view_results.groupby(["i_item_sk"]).size().reset_index()
grouped_df.columns = ["i_item_sk", "cnt"]
return grouped_df
def main(client, config):
item_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
wcs_tstamp_min = get_wcs_minima(config)
item_df["i_item_sk"] = item_df["i_item_sk"].astype("int32")
item_df["i_category_id"] = item_df["i_category_id"].astype("int8")
# we eventually will only care about these categories, so we can filter now
item_df_filtered = item_df.loc[
item_df.i_category_id.isin(q03_purchased_item_category_IN)
].reset_index(drop=True)
# The main idea is that we don't fuse a filtration task with reading task yet
# this causes more memory pressures as we try to read the whole thing ( and spill that)
# at once and then do filtration .
web_clickstream_flist = glob.glob(os.path.join(config["data_dir"], "web_clickstreams/*.parquet"))
task_ls = [
delayed(pre_repartition_task)(fn, item_df.to_delayed()[0], wcs_tstamp_min)
for fn in web_clickstream_flist
]
meta_d = {
"wcs_user_sk": np.ones(1, dtype=np.int32),
"tstamp": np.ones(1, dtype=np.int32),
"wcs_item_sk": np.ones(1, dtype=np.int32),
"wcs_sales_sk": np.ones(1, dtype=np.int32),
"i_category_id": np.ones(1, dtype=np.int8),
}
meta_df = cudf.DataFrame(meta_d)
merged_df = dask_cudf.from_delayed(task_ls, meta=meta_df)
merged_df = merged_df.shuffle(on="wcs_user_sk")
meta_d = {
"i_item_sk": np.ones(1, dtype=merged_df["wcs_item_sk"].dtype),
"cnt": np.ones(1, dtype=merged_df["wcs_item_sk"].dtype),
}
meta_df = cudf.DataFrame(meta_d)
grouped_df = merged_df.map_partitions(
reduction_function, item_df_filtered.to_delayed()[0], meta=meta_df
)
### todo: check if this has any impact on stability
grouped_df = grouped_df.persist(priority=10000)
### todo: remove this later after more testing
wait(grouped_df)
print("---" * 20)
print("grouping complete ={}".format(len(grouped_df)))
grouped_df = grouped_df.groupby(["i_item_sk"]).sum(split_every=2).reset_index()
grouped_df.columns = ["i_item_sk", "cnt"]
result_df = grouped_df.map_partitions(
lambda df: df.sort_values(by=["cnt"], ascending=False)
)
result_df.columns = ["lastviewed_item", "cnt"]
result_df["purchased_item"] = q03_purchased_item_IN
cols_order = ["purchased_item", "lastviewed_item", "cnt"]
result_df = result_df[cols_order]
result_df = result_df.persist()
### todo: remove this later after more testing
wait(result_df)
print(len(result_df))
result_df = result_df.head(q03_limit)
print("result complete")
print("---" * 20)
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q03/gpu_bdb_query_03_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from numba import cuda
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
# -------- Q03 -----------
q03_days_in_sec_before_purchase = 864000
q03_views_before_purchase = 5
q03_purchased_item_IN = 10001
# --see q1 for categories
q03_purchased_item_category_IN = "2,3"
q03_limit = 100
@cuda.jit
def find_items_viewed_before_purchase_kernel(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N
):
"""
Find the past N items viewed after a relevant purchase was made,
as defined by the configuration of this query.
"""
i = cuda.grid(1)
relevant_item = q03_purchased_item_IN
if i < (relevant_idx_col.size): # boundary guard
# every relevant row gets N rows in the output, so we need to map the indexes
# back into their position in the original array
orig_idx = relevant_idx_col[i]
current_user = user_col[orig_idx]
# look at the previous N clicks (assume sorted descending)
rows_to_check = N
remaining_rows = user_col.size - orig_idx
if remaining_rows <= rows_to_check:
rows_to_check = remaining_rows - 1
for k in range(1, rows_to_check + 1):
if current_user != user_col[orig_idx + k]:
out_col[i * N + k - 1] = 0
# only checking relevant purchases via the relevant_idx_col
elif (timestamp_col[orig_idx + k] <= timestamp_col[orig_idx]) & (
timestamp_col[orig_idx + k]
>= (timestamp_col[orig_idx] - q03_days_in_sec_before_purchase)
):
out_col[i * N + k - 1] = item_col[orig_idx + k]
else:
out_col[i * N + k - 1] = 0
def apply_find_items_viewed(df, item_mappings):
import cudf
# need to sort descending to ensure that the
# next N rows are the previous N clicks
df = df.sort_values(
by=["wcs_user_sk", "tstamp", "wcs_sales_sk", "wcs_item_sk"],
ascending=[False, False, False, False],
)
df.reset_index(drop=True, inplace=True)
df["relevant_flag"] = (df.wcs_sales_sk != 0) & (
df.wcs_item_sk == q03_purchased_item_IN
)
df["relevant_idx_pos"] = df.index.to_series()
df.reset_index(drop=True, inplace=True)
# only allocate output for the relevant rows
sample = df.loc[df.relevant_flag == True]
sample.reset_index(drop=True, inplace=True)
N = q03_views_before_purchase
size = len(sample)
# we know this can be int32, since it's going to contain item_sks
out_arr = cuda.device_array(size * N, dtype=df["wcs_item_sk"].dtype)
find_items_viewed_before_purchase_kernel.forall(size)(
sample["relevant_idx_pos"],
df["wcs_user_sk"],
df["tstamp"],
df["wcs_item_sk"],
out_arr,
N,
)
result = cudf.DataFrame({"prior_item_viewed": out_arr})
del out_arr
del df
del sample
filtered = result.merge(
item_mappings,
how="inner",
left_on=["prior_item_viewed"],
right_on=["i_item_sk"],
)
return filtered
def read_tables(data_dir, bc):
bc.create_table("web_clickstreams",
os.path.join(data_dir, "web_clickstreams/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = """
SELECT i_item_sk,
CAST(i_category_id AS TINYINT) AS i_category_id
FROM item
"""
item_df = bc.sql(query_1)
item_df = item_df.persist()
wait(item_df)
bc.create_table("item_df", item_df)
query_2 = """
SELECT CAST(w.wcs_user_sk AS INTEGER) as wcs_user_sk,
wcs_click_date_sk * 86400 + wcs_click_time_sk AS tstamp,
CAST(w.wcs_item_sk AS INTEGER) as wcs_item_sk,
CAST(COALESCE(w.wcs_sales_sk, 0) AS INTEGER) as wcs_sales_sk
FROM web_clickstreams AS w
INNER JOIN item_df AS i ON w.wcs_item_sk = i.i_item_sk
WHERE w.wcs_user_sk IS NOT NULL
AND w.wcs_item_sk IS NOT NULL
ORDER BY w.wcs_user_sk
"""
merged_df = bc.sql(query_2)
query_3 = f"""
SELECT i_item_sk, i_category_id
FROM item_df
WHERE i_category_id IN ({q03_purchased_item_category_IN})
"""
item_df_filtered = bc.sql(query_3)
product_view_results = merged_df.map_partitions(
apply_find_items_viewed, item_mappings=item_df_filtered
)
product_view_results = product_view_results.persist()
wait(product_view_results)
bc.drop_table("item_df")
del item_df
del merged_df
del item_df_filtered
bc.create_table('product_result', product_view_results)
last_query = f"""
SELECT CAST({q03_purchased_item_IN} AS BIGINT) AS purchased_item,
i_item_sk AS lastviewed_item,
COUNT(i_item_sk) AS cnt
FROM product_result
GROUP BY i_item_sk
ORDER BY purchased_item, cnt desc, lastviewed_item
LIMIT {q03_limit}
"""
result = bc.sql(last_query)
bc.drop_table("product_result")
del product_view_results
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q03/gpu_bdb_query_03_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q03_utils import (
apply_find_items_viewed,
q03_purchased_item_IN,
q03_purchased_item_category_IN,
q03_limit,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = """
SELECT i_item_sk,
CAST(i_category_id AS TINYINT) AS i_category_id
FROM item
"""
item_df = c.sql(query_1)
item_df = item_df.persist()
wait(item_df)
c.create_table("item_df", item_df, persist=False)
query_2 = """
SELECT CAST(w.wcs_user_sk AS INTEGER) as wcs_user_sk,
wcs_click_date_sk * 86400 + wcs_click_time_sk AS tstamp,
CAST(w.wcs_item_sk AS INTEGER) as wcs_item_sk,
CAST(COALESCE(w.wcs_sales_sk, 0) AS INTEGER) as wcs_sales_sk
FROM web_clickstreams AS w
INNER JOIN item_df AS i ON w.wcs_item_sk = i.i_item_sk
WHERE w.wcs_user_sk IS NOT NULL
AND w.wcs_item_sk IS NOT NULL
DISTRIBUTE BY wcs_user_sk
"""
merged_df = c.sql(query_2)
query_3 = f"""
SELECT i_item_sk, i_category_id
FROM item_df
WHERE i_category_id IN {q03_purchased_item_category_IN}
"""
item_df_filtered = c.sql(query_3)
product_view_results = merged_df.map_partitions(
apply_find_items_viewed, item_mappings=item_df_filtered
)
c.drop_table("item_df")
del item_df
del merged_df
del item_df_filtered
c.create_table('product_result', product_view_results, persist=False)
last_query = f"""
SELECT CAST({q03_purchased_item_IN} AS BIGINT) AS purchased_item,
i_item_sk AS lastviewed_item,
COUNT(i_item_sk) AS cnt
FROM product_result
GROUP BY i_item_sk
ORDER BY purchased_item, cnt desc, lastviewed_item
LIMIT {q03_limit}
"""
result = c.sql(last_query)
c.drop_table("product_result")
del product_view_results
return result
@annotate("QUERY3", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q29/gpu_bdb_query_29.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q29_utils import (
q29_limit,
read_tables
)
### Implementation Notes:
# * `drop_duplicates` and `groupby` by default brings result to single partition
# * Have changed `drop_duplicates` behaviour to give `n_workers` partitions
# * This can change performence chars at larger scales
### Future Notes:
# Setting index + merge using map_parition can be a work-around if dask native merge is slow
# * Note:Set index isssue: https://github.com/rapidsai/cudf/issues/2272
# * Implientation Idea:
# * After setting index, We can do a (inner join + group-by) with better parallelism and less data movement
### Scalabilty problems
# * The ws_item_join table after distincts has `48M` rows, can cause problems on bigger scale factors
q29_session_timeout_inSec = 3600
###
# Select t1.i_category_id AS category_id_1 , t2.i_category_id AS category_id_2
# FROM (
# ...
# ) t1 Inner Join
# (
# ...
# ) t2
# ON t1.ws_order_number == t2.ws_order_number
# WHERE
# t1.i_category_id<t2.i_category_id
# )
###
def get_pairs(
df,
merge_col="ws_order_number",
pair_col="i_category_id",
output_col_1="category_id_1",
output_col_2="category_id_2",
):
"""
Gets pair after doing a inner merge
"""
pair_df = df.merge(df, on=merge_col, suffixes=["_t1", "_t2"], how="inner")
pair_df = pair_df[[f"{pair_col}_t1", f"{pair_col}_t2"]]
pair_df = pair_df[pair_df[f"{pair_col}_t1"] < pair_df[f"{pair_col}_t2"]]
pair_df = pair_df.rename(
columns={f"{pair_col}_t1": output_col_1, f"{pair_col}_t2": output_col_2}
)
return pair_df
def main(client, config):
item_df, ws_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
### setting index on ws_order_number
ws_df = ws_df.shuffle(on=["ws_order_number"])
### at sf-100k we will have max of 17M rows and 17 M rows with 2 columns, 1 part is very reasonable
item_df = item_df.repartition(npartitions=1)
# SELECT DISTINCT i_category_id, ws_order_number
# FROM web_sales ws, item i
# WHERE ws.ws_item_sk = i.i_item_sk
# AND i.i_category_id IS NOT NULL
f_item_df = item_df[item_df["i_category_id"].notnull()]
### doing below to retain the `ws_order_number` partition boundry after merge
ws_item_join = ws_df.merge(
f_item_df, left_on=["ws_item_sk"], right_on=["i_item_sk"]
)
ws_item_join = ws_item_join[["i_category_id", "ws_order_number"]]
ws_item_join = ws_item_join.map_partitions(lambda df: df.drop_duplicates())
### do pair inner join
### pair_df = get_pairs(ws_item_join)
### because of setting index we can do it in map_partitions
### this can have better memory and scaling props at larger scale factors
pair_df = ws_item_join.map_partitions(get_pairs)
# SELECT category_id_1, category_id_2, COUNT (*) AS cnt
# FROM (
# ...
# )
# GROUP BY category_id_1, category_id_2
# ORDER BY cnt DESC, category_id_1, category_id_2
# LIMIT {q29_limit}
grouped_df = pair_df.groupby(["category_id_1", "category_id_2"]).size().persist()
### 36 rows after filtration at sf-100
### should scale till sf-100k
grouped_df = grouped_df.reset_index().compute()
grouped_df.columns = ["category_id_1", "category_id_2", "cnt"]
grouped_df["category_id_1"] = grouped_df["category_id_1"]
grouped_df["category_id_2"] = grouped_df["category_id_2"]
grouped_df = grouped_df.sort_values(
by=["cnt", "category_id_1", "category_id_2"], ascending=[False, True, True]
).reset_index(drop=True)
grouped_df = grouped_df.head(q29_limit)
return grouped_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q29/gpu_bdb_query_29_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q29_utils import (
q29_limit,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
n_workers = len(client.scheduler_info()["workers"])
join_query = """
-- Removed distinct as we do it in
-- dask_cudf based drop_duplicates with split_out
-- 553 M rows dont fit on single GPU (int32,int64 column)
-- TODO: Remove when we support Split Out
-- https://github.com/dask-contrib/dask-sql/issues/241
SELECT i_category_id, ws_order_number
FROM web_sales ws, item i
WHERE ws.ws_item_sk = i.i_item_sk
AND i.i_category_id IS NOT NULL
"""
result = c.sql(join_query)
# Distinct Calculatiin
result_distinct = result.drop_duplicates(split_out=n_workers,ignore_index=True)
## Remove the int64 index that was created
## TODO Raise a issue for this
result_distinct = result_distinct.reset_index(drop=True)
### Persiting cause Order by causes execution
c.create_table('distinct_table', result_distinct, persist=True)
query = f"""
SELECT category_id_1, category_id_2, COUNT (*) AS cnt
FROM
(
SELECT CAST(t1.i_category_id as BIGINT) AS category_id_1,
CAST(t2.i_category_id as BIGINT) AS category_id_2
FROM distinct_table t1
INNER JOIN distinct_table t2
ON t1.ws_order_number = t2.ws_order_number
WHERE t1.i_category_id < t2.i_category_id
)
GROUP BY category_id_1, category_id_2
ORDER BY cnt DESC, category_id_1, category_id_2
LIMIT {q29_limit}
"""
result = c.sql(query)
result = result.persist()
wait(result);
c.drop_table("distinct_table")
return result
@annotate("QUERY29", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q29/gpu_bdb_query_29_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
# -------- Q29 -----------
q29_limit = 100
def read_tables(data_dir, bc):
bc.create_table('item', os.path.join(data_dir, "item/*.parquet"))
bc.create_table('web_sales', os.path.join(data_dir, "web_sales/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_distinct = """
SELECT DISTINCT i_category_id, ws_order_number
FROM web_sales ws, item i
WHERE ws.ws_item_sk = i.i_item_sk
AND i.i_category_id IS NOT NULL
"""
result_distinct = bc.sql(query_distinct)
result_distinct = result_distinct.persist()
wait(result_distinct)
bc.create_table('distinct_table', result_distinct)
query = f"""
SELECT category_id_1, category_id_2, COUNT (*) AS cnt
FROM
(
SELECT CAST(t1.i_category_id as BIGINT) AS category_id_1,
CAST(t2.i_category_id as BIGINT) AS category_id_2
FROM distinct_table t1
INNER JOIN distinct_table t2
ON t1.ws_order_number = t2.ws_order_number
WHERE t1.i_category_id < t2.i_category_id
)
GROUP BY category_id_1, category_id_2
ORDER BY cnt DESC, category_id_1, category_id_2
LIMIT {q29_limit}
"""
result = bc.sql(query)
bc.drop_table("distinct_table")
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q24/gpu_bdb_query_24_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q24_utils import read_tables
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = """
WITH temp_table as
(
SELECT
i_item_sk,
imp_sk,
(imp_competitor_price - i_current_price) / i_current_price AS price_change,
imp_start_date,
(imp_end_date - imp_start_date) AS no_days_comp_price
FROM item i ,item_marketprices imp
WHERE i.i_item_sk = imp.imp_item_sk
AND i.i_item_sk = 10000
ORDER BY i_item_sk, imp_sk, imp_start_date
)
SELECT ws_item_sk,
-- avg ( (current_ss_quant + current_ws_quant - prev_ss_quant - prev_ws_quant) / ((prev_ss_quant + prev_ws_quant) * ws.price_change) ) -- single node
sum( (current_ss_quant+current_ws_quant-prev_ss_quant-prev_ws_quant) / (prev_ss_quant*ws.price_change+prev_ws_quant*ws.price_change) )
/ count( (current_ss_quant + current_ws_quant - prev_ss_quant - prev_ws_quant) / ((prev_ss_quant + prev_ws_quant) * ws.price_change) ) AS cross_price_elasticity
FROM
(
SELECT
ws_item_sk,
imp_sk,
price_change,
SUM( CASE WHEN ( (ws_sold_date_sk >= c.imp_start_date) AND (ws_sold_date_sk < (c.imp_start_date + c.no_days_comp_price))) THEN ws_quantity ELSE 0 END ) AS current_ws_quant,
SUM( CASE WHEN ( (ws_sold_date_sk >= (c.imp_start_date - c.no_days_comp_price)) AND (ws_sold_date_sk < c.imp_start_date)) THEN ws_quantity ELSE 0 END ) AS prev_ws_quant
FROM web_sales ws
JOIN temp_table c ON ws.ws_item_sk = c.i_item_sk
GROUP BY ws_item_sk, imp_sk, price_change
) ws JOIN
(
SELECT
ss_item_sk,
imp_sk,
price_change,
SUM( CASE WHEN ((ss_sold_date_sk >= c.imp_start_date) AND (ss_sold_date_sk < (c.imp_start_date + c.no_days_comp_price))) THEN ss_quantity ELSE 0 END) AS current_ss_quant,
SUM( CASE WHEN ((ss_sold_date_sk >= (c.imp_start_date - c.no_days_comp_price)) AND (ss_sold_date_sk < c.imp_start_date)) THEN ss_quantity ELSE 0 END) AS prev_ss_quant
FROM store_sales ss
JOIN temp_table c ON c.i_item_sk = ss.ss_item_sk
GROUP BY ss_item_sk, imp_sk, price_change
) ss
ON (ws.ws_item_sk = ss.ss_item_sk and ws.imp_sk = ss.imp_sk)
GROUP BY ws.ws_item_sk
"""
result = c.sql(query)
return result
@annotate("QUERY24", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q24/gpu_bdb_query_24_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
def read_tables(data_dir, bc):
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
bc.create_table("item_marketprices", os.path.join(data_dir, "item_marketprices/*.parquet"))
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = """
WITH temp_table as
(
SELECT
i_item_sk,
imp_sk,
(imp_competitor_price - i_current_price) / i_current_price AS price_change,
imp_start_date,
(imp_end_date - imp_start_date) AS no_days_comp_price
FROM item i ,item_marketprices imp
WHERE i.i_item_sk = imp.imp_item_sk
AND i.i_item_sk = 10000
ORDER BY i_item_sk, imp_sk, imp_start_date
)
SELECT ws_item_sk,
-- avg ( (current_ss_quant + current_ws_quant - prev_ss_quant - prev_ws_quant) / ((prev_ss_quant + prev_ws_quant) * ws.price_change) ) -- single node
sum( (current_ss_quant+current_ws_quant-prev_ss_quant-prev_ws_quant) / (prev_ss_quant*ws.price_change+prev_ws_quant*ws.price_change) )
/ count( (current_ss_quant + current_ws_quant - prev_ss_quant - prev_ws_quant) / ((prev_ss_quant + prev_ws_quant) * ws.price_change) ) AS cross_price_elasticity
FROM
(
SELECT
ws_item_sk,
imp_sk,
price_change,
SUM( CASE WHEN ( (ws_sold_date_sk >= c.imp_start_date) AND (ws_sold_date_sk < (c.imp_start_date + c.no_days_comp_price))) THEN ws_quantity ELSE 0 END ) AS current_ws_quant,
SUM( CASE WHEN ( (ws_sold_date_sk >= (c.imp_start_date - c.no_days_comp_price)) AND (ws_sold_date_sk < c.imp_start_date)) THEN ws_quantity ELSE 0 END ) AS prev_ws_quant
FROM web_sales ws
JOIN temp_table c ON ws.ws_item_sk = c.i_item_sk
GROUP BY ws_item_sk, imp_sk, price_change
) ws JOIN
(
SELECT
ss_item_sk,
imp_sk,
price_change,
SUM( CASE WHEN ((ss_sold_date_sk >= c.imp_start_date) AND (ss_sold_date_sk < (c.imp_start_date + c.no_days_comp_price))) THEN ss_quantity ELSE 0 END) AS current_ss_quant,
SUM( CASE WHEN ((ss_sold_date_sk >= (c.imp_start_date - c.no_days_comp_price)) AND (ss_sold_date_sk < c.imp_start_date)) THEN ss_quantity ELSE 0 END) AS prev_ss_quant
FROM store_sales ss
JOIN temp_table c ON c.i_item_sk = ss.ss_item_sk
GROUP BY ss_item_sk, imp_sk, price_change
) ss
ON (ws.ws_item_sk = ss.ss_item_sk and ws.imp_sk = ss.imp_sk)
GROUP BY ws.ws_item_sk
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q24/gpu_bdb_query_24.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q24_utils import read_tables
from distributed import wait
### Current Implimenation Assumption
### Grouped Store sales and web sales of 1 item grouped by `date_sk` should fit in memory as number of dates is limited
## query parameter
q24_i_item_sk = 10000
def get_helper_query_table(imp_df, item_df):
f_imp_df = (
imp_df.query(f"imp_item_sk == {q24_i_item_sk}", meta=imp_df._meta)
.repartition(npartitions=1)
.persist()
)
f_item_df = (
item_df.query(f"i_item_sk == {q24_i_item_sk}", meta=item_df._meta)
.repartition(npartitions=1)
.persist()
)
item_imp_join_df = f_item_df.merge(
f_imp_df, left_on="i_item_sk", right_on="imp_item_sk"
)
### item_imp_join_df only has 4 rows
item_imp_join_df["price_change"] = (
item_imp_join_df["imp_competitor_price"] - item_imp_join_df["i_current_price"]
)
item_imp_join_df["price_change"] = (
item_imp_join_df["price_change"] / item_imp_join_df["i_current_price"]
)
item_imp_join_df["no_days_comp_price"] = (
item_imp_join_df["imp_end_date"] - item_imp_join_df["imp_start_date"]
)
item_imp_join_df = item_imp_join_df[
["i_item_sk", "imp_sk", "imp_start_date", "price_change", "no_days_comp_price"]
]
item_imp_join_df = item_imp_join_df.sort_values(
by=["i_item_sk", "imp_sk", "imp_start_date"]
)
return item_imp_join_df
def get_prev_current_ws(df, websales_col="ws_sum"):
"""
This function assigns the previous and current web-sales for the merged df
"""
### todo: see how toggling assigning scaler vs vector impact performence
curr_ws_f = (df["ws_sold_date_sk"] >= df["imp_start_date"]) & (
df["ws_sold_date_sk"] < (df["imp_start_date"] + df["no_days_comp_price"])
)
prev_ws_f = (
df["ws_sold_date_sk"] >= (df["imp_start_date"] - df["no_days_comp_price"])
) & (df["ws_sold_date_sk"] < (df["imp_start_date"]))
df["current_ws_quant"] = 0
df["current_ws_quant"][curr_ws_f] = df[websales_col][curr_ws_f]
df["prev_ws_quant"] = 0
df["prev_ws_quant"][prev_ws_f] = df[websales_col][prev_ws_f]
return df
def get_prev_current_ss(df, store_sales_col="ss_sum"):
"""
This function assigns the previous and current store-sales for the merged df
"""
### todo: see how toggling assigning scaler vs vector impact performence
curr_ss_f = (df["ss_sold_date_sk"] >= df["imp_start_date"]) & (
df["ss_sold_date_sk"] < (df["imp_start_date"] + df["no_days_comp_price"])
)
prev_ss_f = (
df["ss_sold_date_sk"] >= (df["imp_start_date"] - df["no_days_comp_price"])
) & (df["ss_sold_date_sk"] < (df["imp_start_date"]))
df["current_ss_quant"] = 0
df["current_ss_quant"][curr_ss_f] = df[store_sales_col][curr_ss_f]
df["prev_ss_quant"] = 0
df["prev_ss_quant"][prev_ss_f] = df[store_sales_col][prev_ss_f]
return df
def get_ws(ws_df, item_imp_join_df):
f_ws_df = ws_df.query(f"ws_item_sk == {q24_i_item_sk}", meta=ws_df._meta)
## we know that number of dates is limited and we only have 1 item
f_ws_g_df = (
f_ws_df.groupby(["ws_item_sk", "ws_sold_date_sk"])
.agg({"ws_quantity": "sum"})
.reset_index(drop=False)
.repartition(npartitions=1)
.persist()
)
f_ws_g_df = f_ws_g_df.rename(columns={"ws_quantity": "ws_sum"})
f_ws_item_imp_join_df = f_ws_g_df.merge(
item_imp_join_df, left_on="ws_item_sk", right_on="i_item_sk", how="inner"
)
r_ws = f_ws_item_imp_join_df.map_partitions(get_prev_current_ws)
r_ws = (
r_ws.groupby(["ws_item_sk", "imp_sk", "price_change"])
.agg({"current_ws_quant": "sum", "prev_ws_quant": "sum"})
.reset_index(drop=False)
)
return r_ws
def get_ss(ss_df, item_imp_join_df):
f_ss_df = ss_df.query(
f"ss_item_sk == {q24_i_item_sk}", meta=ss_df._meta
).reset_index(drop=True)
f_ss_g_df = (
f_ss_df.groupby(["ss_item_sk", "ss_sold_date_sk"])
.agg({"ss_quantity": "sum"})
.reset_index(drop=False)
.repartition(npartitions=1)
.persist()
)
### we know that the the number of dates is limited so below should always fit in memory
f_ss_g_df = f_ss_g_df.rename(columns={"ss_quantity": "ss_sum"})
# f_ws_df_grouped_df = f_ws_df.group_by('ws_item_sk')
f_ss_item_imp_join_df = f_ss_g_df.merge(
item_imp_join_df, left_on="ss_item_sk", right_on="i_item_sk", how="inner"
)
r_ss = f_ss_item_imp_join_df.map_partitions(get_prev_current_ss)
r_ss = (
r_ss.groupby(["ss_item_sk", "imp_sk"])
.agg({"current_ss_quant": "sum", "prev_ss_quant": "sum"})
.reset_index(drop=False)
)
cols_2_keep = ["current_ss_quant", "prev_ss_quant", "ss_item_sk", "imp_sk"]
r_ss = r_ss[cols_2_keep]
return r_ss
def main(client, config):
ws_df, item_df, imp_df, ss_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
## helper table
item_imp_join_df = get_helper_query_table(imp_df, item_df)
r_ss = get_ss(ss_df, item_imp_join_df)
r_ws = get_ws(ws_df, item_imp_join_df)
result_df = r_ws.merge(
r_ss,
left_on=["ws_item_sk", "imp_sk"],
right_on=["ss_item_sk", "imp_sk"],
how="inner",
suffixes=("ws", "ss"),
)
result_df["cross_price_elasticity"] = (
result_df["current_ss_quant"]
+ result_df["current_ws_quant"]
- result_df["prev_ss_quant"]
- result_df["prev_ws_quant"]
)
result_df["cross_price_elasticity"] = result_df["cross_price_elasticity"] / (
(result_df["prev_ss_quant"] + result_df["prev_ws_quant"])
* result_df["price_change"]
)
final_cols_2_keep = ["ws_item_sk", "cross_price_elasticity"]
result_df = result_df[final_cols_2_keep]
result_df = result_df.groupby(["ws_item_sk"]).agg(
{"cross_price_elasticity": "mean"}
)
result_df = result_df.reset_index(drop=False)
wait(result_df)
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q07/gpu_bdb_query_07.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q07_utils import read_tables
q07_HIGHER_PRICE_RATIO = 1.2
# --store_sales date
q07_YEAR = 2004
q07_MONTH = 7
q07_HAVING_COUNT_GE = 10
q07_LIMIT = 10
def create_high_price_items_df(item_df):
grouped_item_df = (
item_df[["i_category", "i_current_price"]]
.groupby(["i_category"])
.agg({"i_current_price": "mean"})
)
grouped_item_df = grouped_item_df.rename(columns={"i_current_price": "avg_price"})
grouped_item_df = grouped_item_df.reset_index(drop=False)
item_df = item_df.merge(grouped_item_df)
item_df = item_df[
item_df["i_current_price"] > item_df["avg_price"] * q07_HIGHER_PRICE_RATIO
].reset_index(drop=True)
high_price_items_df = item_df
del item_df
return high_price_items_df
def main(client, config):
(
item_df,
store_sales_df,
date_dim_df,
customer_df,
customer_address_df,
) = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
high_price_items_df = create_high_price_items_df(item_df)
del item_df
### Query 0. Date Time Filteration Logic
filtered_date_df = date_dim_df.query(
f"d_year == {q07_YEAR} and d_moy == {q07_MONTH}", meta=date_dim_df._meta
).reset_index(drop=True)
### filtering store sales to above dates
store_sales_df = store_sales_df.merge(
filtered_date_df,
left_on=["ss_sold_date_sk"],
right_on=["d_date_sk"],
how="inner",
)
### cols 2 keep after merge
store_sales_cols = ["ss_item_sk", "ss_customer_sk", "ss_sold_date_sk"]
store_sales_df = store_sales_df[store_sales_cols]
#### Query 1. `store_sales` join `highPriceItems`
store_sales_highPriceItems_join_df = store_sales_df.merge(
high_price_items_df, left_on=["ss_item_sk"], right_on=["i_item_sk"], how="inner"
)
#### Query 2. `Customer` Merge `store_sales_highPriceItems_join_df`
store_sales_highPriceItems_customer_join_df = store_sales_highPriceItems_join_df.merge(
customer_df, left_on=["ss_customer_sk"], right_on=["c_customer_sk"], how="inner"
)
#### Query 3. `store_sales_highPriceItems_customer_join_df` Merge `Customer Address`
customer_address_df = customer_address_df[customer_address_df["ca_state"].notnull()]
final_merged_df = store_sales_highPriceItems_customer_join_df.merge(
customer_address_df, left_on=["c_current_addr_sk"], right_on=["ca_address_sk"]
)
#### Query 4. Final State Grouped Query
count_df = final_merged_df["ca_state"].value_counts()
### number of states is limited=50
### so it can remain a cudf frame
count_df = count_df.compute()
count_df = count_df[count_df >= q07_HAVING_COUNT_GE]
count_df = count_df.sort_values(ascending=False)
result_df = count_df.head(q07_LIMIT)
result_df = result_df.reset_index(drop=False).rename(
columns={"index": "ca_state", "ca_state": "cnt"}
)
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q07/gpu_bdb_query_07_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q07_utils import read_tables
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = """
WITH temp_table as
(
SELECT k.i_item_sk
FROM item k,
(
SELECT i_category,
SUM(j.i_current_price) / COUNT(j.i_current_price) * 1.2 AS avg_price
FROM item j
GROUP BY j.i_category
) avgCategoryPrice
WHERE avgCategoryPrice.i_category = k.i_category
AND k.i_current_price > avgCategoryPrice.avg_price
)
SELECT ca_state, COUNT(*) AS cnt
FROM
customer_address a,
customer c,
store_sales s,
temp_table highPriceItems
WHERE a.ca_address_sk = c.c_current_addr_sk
AND c.c_customer_sk = s.ss_customer_sk
AND ca_state IS NOT NULL
AND ss_item_sk = highPriceItems.i_item_sk
AND s.ss_sold_date_sk IN
(
SELECT d_date_sk
FROM date_dim
WHERE d_year = 2004
AND d_moy = 7
)
GROUP BY ca_state
HAVING COUNT(*) >= 10
ORDER BY cnt DESC, ca_state
LIMIT 10
"""
result = c.sql(query)
return result
@annotate("QUERY7", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q07/gpu_bdb_query_07_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
def read_tables(data_dir, bc):
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
bc.create_table("customer", os.path.join(data_dir, "customer/*.parquet"))
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("customer_address", os.path.join(data_dir, "customer_address/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = """
WITH temp_table as
(
SELECT k.i_item_sk
FROM item k,
(
SELECT i_category,
SUM(j.i_current_price) / COUNT(j.i_current_price) * 1.2 AS avg_price
FROM item j
GROUP BY j.i_category
) avgCategoryPrice
WHERE avgCategoryPrice.i_category = k.i_category
AND k.i_current_price > avgCategoryPrice.avg_price
)
SELECT ca_state, COUNT(*) AS cnt
FROM
customer_address a,
customer c,
store_sales s,
temp_table highPriceItems
WHERE a.ca_address_sk = c.c_current_addr_sk
AND c.c_customer_sk = s.ss_customer_sk
AND ca_state IS NOT NULL
AND ss_item_sk = highPriceItems.i_item_sk
AND s.ss_sold_date_sk IN
(
SELECT d_date_sk
FROM date_dim
WHERE d_year = 2004
AND d_moy = 7
)
GROUP BY ca_state
HAVING COUNT(*) >= 10
ORDER BY cnt DESC, ca_state
LIMIT 10
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q04/gpu_bdb_query_04_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
import dask_cudf
import cudf
import pandas as pd
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q04_utils import (
reduction_function,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_web_page = """
SELECT wp_type, wp_web_page_sk
FROM web_page_wo_categorical
"""
wp = c.sql(query_web_page)
# Convert wp_type to categorical and get cat_id of review and dynamic type
wp["wp_type"] = wp["wp_type"].map_partitions(
lambda ser: ser.astype("category"))
cpu_categories = wp["wp_type"].compute().cat.categories
if isinstance(wp, dask_cudf.DataFrame):
cpu_categories = cpu_categories.to_pandas()
DYNAMIC_CAT_CODE = cpu_categories.get_loc("dynamic")
ORDER_CAT_CODE = cpu_categories.get_loc("order")
# ### cast to minimum viable dtype
wp["wp_type_codes"] = wp["wp_type"].cat.codes
cols_2_keep = ["wp_web_page_sk", "wp_type_codes"]
wp = wp[cols_2_keep]
wp = wp.persist()
wait(wp)
c.create_table('web_page', wp, persist=False)
query = """
SELECT
c.wcs_user_sk,
w.wp_type_codes,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec
FROM web_clickstreams c, web_page w
WHERE c.wcs_web_page_sk = w.wp_web_page_sk
AND c.wcs_web_page_sk IS NOT NULL
AND c.wcs_user_sk IS NOT NULL
AND c.wcs_sales_sk IS NULL --abandoned implies: no sale
DISTRIBUTE BY wcs_user_sk
"""
merged_df = c.sql(query)
keep_cols = ["wcs_user_sk", "wp_type_codes", "tstamp_inSec"]
result_df = merged_df.map_partitions(
reduction_function, keep_cols, DYNAMIC_CAT_CODE, ORDER_CAT_CODE
)
result = result_df["pagecount"].sum() / result_df["count"].sum()
# Persist before computing to ensure scalar transfer only on compute
result = result.persist()
result = result.compute()
if isinstance(wp, dask_cudf.DataFrame):
result_df = cudf.DataFrame({"sum(pagecount)/count(*)": [result]})
else:
result_df = pd.DataFrame({"sum(pagecount)/count(*)": [result]})
c.drop_table("web_page")
return result_df
@annotate("QUERY4", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q04/gpu_bdb_query_04_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.sessionization import get_sessions
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
def abandonedShoppingCarts(df, DYNAMIC_CAT_CODE, ORDER_CAT_CODE):
import cudf
# work around for https://github.com/rapidsai/cudf/issues/5470
df.reset_index(drop=True, inplace=True)
# Select groups where last dynamic row comes after last order row
filtered_df = df[
(df["wp_type_codes"] == ORDER_CAT_CODE)
| (df["wp_type_codes"] == DYNAMIC_CAT_CODE)
]
# work around for https://github.com/rapidsai/cudf/issues/5470
filtered_df.reset_index(drop=True, inplace=True)
# Create a new column that is the concatenation of timestamp and wp_type_codes
# (eg:123456:3, 234567:5)
filtered_df["wp_type_codes"] = (
filtered_df["tstamp_inSec"]
.astype("str")
.str.cat(filtered_df["wp_type_codes"].astype("str"), sep=":")
)
# This gives the last occurrence (by timestamp) within the "order", "dynamic" wp_types
filtered_df = filtered_df.groupby(
["wcs_user_sk", "session_id"], as_index=False, sort=False
).agg({"wp_type_codes": "max"})
# If the max contains dynamic, keep the row else discard.
last_dynamic_df = filtered_df[
filtered_df["wp_type_codes"].str.contains(
":" + str(DYNAMIC_CAT_CODE), regex=False
)
]
del filtered_df
# Find counts for each group
grouped_count_df = df.groupby(
["wcs_user_sk", "session_id"], as_index=False, sort=False
).agg({"tstamp_inSec": "count"})
# Merge counts with the "dynamic" shopping cart groups
result = last_dynamic_df.merge(
grouped_count_df, on=["wcs_user_sk", "session_id"], how="inner"
)
del (last_dynamic_df, grouped_count_df)
return cudf.DataFrame(
{"pagecount": result.tstamp_inSec.sum(), "count": len(result)}
)
def reduction_function(df, keep_cols, DYNAMIC_CAT_CODE, ORDER_CAT_CODE):
df = get_sessions(df, keep_cols=keep_cols)
df = abandonedShoppingCarts(
df, DYNAMIC_CAT_CODE=DYNAMIC_CAT_CODE, ORDER_CAT_CODE=ORDER_CAT_CODE
)
return df
def read_tables(data_dir, bc):
bc.create_table('web_page_wo_categorical', os.path.join(data_dir, "web_page/*.parquet"))
bc.create_table('web_clickstreams',
os.path.join(data_dir, "web_clickstreams/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_web_page = """
SELECT wp_type, wp_web_page_sk
FROM web_page_wo_categorical
"""
wp = bc.sql(query_web_page)
# Convert wp_type to categorical and get cat_id of review and dynamic type
wp["wp_type"] = wp["wp_type"].map_partitions(
lambda ser: ser.astype("category"))
cpu_categories = wp["wp_type"].compute().cat.categories.to_pandas()
DYNAMIC_CAT_CODE = cpu_categories.get_loc("dynamic")
ORDER_CAT_CODE = cpu_categories.get_loc("order")
# ### cast to minimum viable dtype
import cudf
codes_min_signed_type = cudf.utils.dtypes.min_signed_type(
len(cpu_categories))
wp["wp_type_codes"] = wp["wp_type"].cat.codes.astype(codes_min_signed_type)
wp["wp_type"] = wp["wp_type"].cat.codes.astype(codes_min_signed_type)
cols_2_keep = ["wp_web_page_sk", "wp_type_codes"]
wp = wp[cols_2_keep]
wp = wp.persist()
wait(wp)
bc.create_table('web_page', wp)
query = """
SELECT
c.wcs_user_sk,
w.wp_type_codes,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec
FROM web_clickstreams c, web_page w
WHERE c.wcs_web_page_sk = w.wp_web_page_sk
AND c.wcs_web_page_sk IS NOT NULL
AND c.wcs_user_sk IS NOT NULL
AND c.wcs_sales_sk IS NULL --abandoned implies: no sale
ORDER BY wcs_user_sk, tstamp_inSec
"""
merged_df = bc.sql(query)
keep_cols = ["wcs_user_sk", "wp_type_codes", "tstamp_inSec"]
result_df = merged_df.map_partitions(
reduction_function, keep_cols, DYNAMIC_CAT_CODE, ORDER_CAT_CODE
)
result = result_df["pagecount"].sum() / result_df["count"].sum()
# Persist before computing to ensure scalar transfer only on compute
result = result.persist()
result = result.compute()
result_df = cudf.DataFrame({"sum(pagecount)/count(*)": [result]})
bc.drop_table("web_page")
return result_df
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q04/gpu_bdb_query_04.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q04_utils import (
reduction_function,
read_tables
)
def main(client, config):
wp, wcs_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
### downcasting the column inline with q03
wcs_df["wcs_user_sk"] = wcs_df["wcs_user_sk"].astype("int32")
f_wcs_df = wcs_df[
wcs_df["wcs_web_page_sk"].notnull()
& wcs_df["wcs_user_sk"].notnull()
& wcs_df["wcs_sales_sk"].isnull()
].reset_index(drop=True)
f_wcs_df["tstamp_inSec"] = (
f_wcs_df["wcs_click_date_sk"] * 24 * 60 * 60 + f_wcs_df["wcs_click_time_sk"]
)
keep_cols = ["wcs_user_sk", "tstamp_inSec", "wcs_web_page_sk"]
f_wcs_df = f_wcs_df[keep_cols]
f_wcs_df = f_wcs_df.shuffle(on=["wcs_user_sk"])
# Convert wp_type to categorical and get cat_id of review and dynamic type
wp["wp_type"] = wp["wp_type"].map_partitions(lambda ser: ser.astype("category"))
cpu_categories = wp["wp_type"].compute().cat.categories.to_pandas()
DYNAMIC_CAT_CODE = cpu_categories.get_loc("dynamic")
ORDER_CAT_CODE = cpu_categories.get_loc("order")
# ### cast to minimum viable dtype
codes_min_signed_type = cudf.utils.dtypes.min_signed_type(len(cpu_categories))
wp["wp_type_codes"] = wp["wp_type"].cat.codes.astype(codes_min_signed_type)
cols_2_keep = ["wp_web_page_sk", "wp_type_codes"]
wp = wp[cols_2_keep]
# Continue remaining workflow with wp_type as category codes
merged_df = f_wcs_df.merge(
wp, left_on="wcs_web_page_sk", right_on="wp_web_page_sk", how="inner"
)
merged_df = merged_df[["wcs_user_sk", "tstamp_inSec", "wp_type_codes"]]
keep_cols = ["wcs_user_sk", "wp_type_codes", "tstamp_inSec"]
result_df = merged_df.map_partitions(
reduction_function, keep_cols, DYNAMIC_CAT_CODE, ORDER_CAT_CODE
)
result = result_df["pagecount"].sum() / result_df["count"].sum()
# Persist before computing to ensure scalar transfer only on compute
result = result.persist()
result = result.compute()
result_df = cudf.DataFrame({"sum(pagecount)/count(*)": [result]})
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q09/gpu_bdb_query_09_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q09_utils import (
q09_year,
q09_part1_ca_country,
q09_part1_ca_state_IN,
q09_part1_net_profit_min,
q09_part1_net_profit_max,
q09_part1_education_status,
q09_part1_marital_status,
q09_part1_sales_price_min,
q09_part1_sales_price_max,
q09_part2_ca_country,
q09_part2_ca_state_IN,
q09_part2_net_profit_min,
q09_part2_net_profit_max,
q09_part2_education_status,
q09_part2_marital_status,
q09_part2_sales_price_min,
q09_part2_sales_price_max,
q09_part3_ca_country,
q09_part3_ca_state_IN,
q09_part3_net_profit_min,
q09_part3_net_profit_max,
q09_part3_education_status,
q09_part3_marital_status,
q09_part3_sales_price_min,
q09_part3_sales_price_max,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = f"""
SELECT SUM(ss1.ss_quantity)
FROM store_sales ss1,
date_dim dd,customer_address ca1,
store s,
customer_demographics cd
-- select date range
WHERE ss1.ss_sold_date_sk = dd.d_date_sk
AND dd.d_year = {q09_year}
AND ss1.ss_addr_sk = ca1.ca_address_sk
AND s.s_store_sk = ss1.ss_store_sk
AND cd.cd_demo_sk = ss1.ss_cdemo_sk
AND
(
(
cd.cd_marital_status = '{q09_part1_marital_status}'
AND cd.cd_education_status = '{q09_part1_education_status}'
AND {q09_part1_sales_price_min} <= ss1.ss_sales_price
AND ss1.ss_sales_price <= {q09_part1_sales_price_max}
)
OR
(
cd.cd_marital_status = '{q09_part2_marital_status}'
AND cd.cd_education_status = '{q09_part2_education_status}'
AND {q09_part2_sales_price_min} <= ss1.ss_sales_price
AND ss1.ss_sales_price <= {q09_part2_sales_price_max}
)
OR
(
cd.cd_marital_status = '{q09_part3_marital_status}'
AND cd.cd_education_status = '{q09_part3_education_status}'
AND {q09_part3_sales_price_min} <= ss1.ss_sales_price
AND ss1.ss_sales_price <= {q09_part3_sales_price_max}
)
)
AND
(
(
ca1.ca_country = '{q09_part1_ca_country}'
AND ca1.ca_state IN {q09_part1_ca_state_IN}
AND {q09_part1_net_profit_min} <= ss1.ss_net_profit
AND ss1.ss_net_profit <= {q09_part1_net_profit_max}
)
OR
(
ca1.ca_country = '{q09_part2_ca_country}'
AND ca1.ca_state IN {q09_part2_ca_state_IN}
AND {q09_part2_net_profit_min} <= ss1.ss_net_profit
AND ss1.ss_net_profit <= {q09_part2_net_profit_max}
)
OR
(
ca1.ca_country = '{q09_part3_ca_country}'
AND ca1.ca_state IN {q09_part3_ca_state_IN}
AND {q09_part3_net_profit_min} <= ss1.ss_net_profit
AND ss1.ss_net_profit <= {q09_part3_net_profit_max}
)
)
"""
result = c.sql(query)
result.columns = ["sum(ss_quantity)"]
return result
@annotate("QUERY9", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q09/gpu_bdb_query_09_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
# -------- Q9 -----------
q09_year = 2001
q09_part1_ca_country = "United States"
q09_part1_ca_state_IN = "'KY', 'GA', 'NM'"
q09_part1_net_profit_min = 0
q09_part1_net_profit_max = 2000
q09_part1_education_status = "4 yr Degree"
q09_part1_marital_status = "M"
q09_part1_sales_price_min = 100
q09_part1_sales_price_max = 150
q09_part2_ca_country = "United States"
q09_part2_ca_state_IN = "'MT', 'OR', 'IN'"
q09_part2_net_profit_min = 150
q09_part2_net_profit_max = 3000
q09_part2_education_status = "4 yr Degree"
q09_part2_marital_status = "M"
q09_part2_sales_price_min = 50
q09_part2_sales_price_max = 200
q09_part3_ca_country = "United States"
q09_part3_ca_state_IN = "'WI', 'MO', 'WV'"
q09_part3_net_profit_min = 50
q09_part3_net_profit_max = 25000
q09_part3_education_status = "4 yr Degree"
q09_part3_marital_status = "M"
q09_part3_sales_price_min = 150
q09_part3_sales_price_max = 200
def read_tables(data_dir, bc):
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("customer_address", os.path.join(data_dir, "customer_address/*.parquet"))
bc.create_table(
"customer_demographics", os.path.join(data_dir, "customer_demographics/*.parquet"
))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("store", os.path.join(data_dir, "store/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
SELECT SUM(ss1.ss_quantity)
FROM store_sales ss1,
date_dim dd,customer_address ca1,
store s,
customer_demographics cd
-- select date range
WHERE ss1.ss_sold_date_sk = dd.d_date_sk
AND dd.d_year = {q09_year}
AND ss1.ss_addr_sk = ca1.ca_address_sk
AND s.s_store_sk = ss1.ss_store_sk
AND cd.cd_demo_sk = ss1.ss_cdemo_sk
AND
(
(
cd.cd_marital_status = '{q09_part1_marital_status}'
AND cd.cd_education_status = '{q09_part1_education_status}'
AND {q09_part1_sales_price_min} <= ss1.ss_sales_price
AND ss1.ss_sales_price <= {q09_part1_sales_price_max}
)
OR
(
cd.cd_marital_status = '{q09_part2_marital_status}'
AND cd.cd_education_status = '{q09_part2_education_status}'
AND {q09_part2_sales_price_min} <= ss1.ss_sales_price
AND ss1.ss_sales_price <= {q09_part2_sales_price_max}
)
OR
(
cd.cd_marital_status = '{q09_part3_marital_status}'
AND cd.cd_education_status = '{q09_part3_education_status}'
AND {q09_part3_sales_price_min} <= ss1.ss_sales_price
AND ss1.ss_sales_price <= {q09_part3_sales_price_max}
)
)
AND
(
(
ca1.ca_country = '{q09_part1_ca_country}'
AND ca1.ca_state IN ({q09_part1_ca_state_IN})
AND {q09_part1_net_profit_min} <= ss1.ss_net_profit
AND ss1.ss_net_profit <= {q09_part1_net_profit_max}
)
OR
(
ca1.ca_country = '{q09_part2_ca_country}'
AND ca1.ca_state IN ({q09_part2_ca_state_IN})
AND {q09_part2_net_profit_min} <= ss1.ss_net_profit
AND ss1.ss_net_profit <= {q09_part2_net_profit_max}
)
OR
(
ca1.ca_country = '{q09_part3_ca_country}'
AND ca1.ca_state IN ({q09_part3_ca_state_IN})
AND {q09_part3_net_profit_min} <= ss1.ss_net_profit
AND ss1.ss_net_profit <= {q09_part3_net_profit_max}
)
)
"""
result = bc.sql(query)
result.columns = ["sum(ss_quantity)"]
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q09/gpu_bdb_query_09.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q09_utils import (
q09_year,
q09_part1_ca_country,
q09_part1_ca_state_IN,
q09_part1_net_profit_min,
q09_part1_net_profit_max,
q09_part1_education_status,
q09_part1_marital_status,
q09_part1_sales_price_min,
q09_part1_sales_price_max,
q09_part2_ca_country,
q09_part2_ca_state_IN,
q09_part2_net_profit_min,
q09_part2_net_profit_max,
q09_part2_education_status,
q09_part2_marital_status,
q09_part2_sales_price_min,
q09_part2_sales_price_max,
q09_part3_ca_country,
q09_part3_ca_state_IN,
q09_part3_net_profit_min,
q09_part3_net_profit_max,
q09_part3_education_status,
q09_part3_marital_status,
q09_part3_sales_price_min,
q09_part3_sales_price_max,
read_tables
)
def main(client, config):
(
store_sales,
customer_address,
customer_demographics,
date_dim,
store,
) = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
date_dim = date_dim.query(
"d_year==@q09_year", meta=date_dim._meta, local_dict={"q09_year": q09_year}
).reset_index(drop=True)
output_table = store_sales.merge(
date_dim, left_on=["ss_sold_date_sk"], right_on=["d_date_sk"], how="inner"
)
output_table = output_table.drop(
columns=["d_year", "d_date_sk", "ss_sold_date_sk"]
) # Drop the columns that are not needed
output_table = output_table.merge(
store, left_on=["ss_store_sk"], right_on=["s_store_sk"], how="inner"
)
output_table = output_table.drop(columns=["ss_store_sk", "s_store_sk"])
output_table = output_table.merge(
customer_demographics,
left_on=["ss_cdemo_sk"],
right_on=["cd_demo_sk"],
how="inner",
)
output_table = output_table[
(
(output_table["cd_marital_status"] == q09_part1_marital_status)
& (output_table["cd_education_status"] == q09_part1_education_status)
& (output_table["ss_sales_price"] >= q09_part1_sales_price_min)
& (output_table["ss_sales_price"] <= q09_part1_sales_price_max)
)
| (
(output_table["cd_marital_status"] == q09_part2_marital_status)
& (output_table["cd_education_status"] == q09_part2_education_status)
& (output_table["ss_sales_price"] >= q09_part2_sales_price_min)
& (output_table["ss_sales_price"] <= q09_part2_sales_price_max)
)
| (
(output_table["cd_marital_status"] == q09_part3_marital_status)
& (output_table["cd_education_status"] == q09_part3_education_status)
& (output_table["ss_sales_price"] >= q09_part3_sales_price_min)
& (output_table["ss_sales_price"] <= q09_part3_sales_price_max)
)
].reset_index(drop=True)
output_table = output_table.drop(
columns=[
"ss_cdemo_sk",
"cd_demo_sk",
"cd_marital_status",
"cd_education_status",
"ss_sales_price",
]
)
output_table = output_table.merge(
customer_address,
left_on=["ss_addr_sk"],
right_on=["ca_address_sk"],
how="inner",
)
output_table = output_table[
(
(output_table["ca_country"] == q09_part1_ca_country)
& (output_table["ca_state"].str.contains("|".join(q09_part1_ca_state_IN)))
& (output_table["ss_net_profit"] >= q09_part1_net_profit_min)
& (output_table["ss_net_profit"] <= q09_part1_net_profit_max)
)
| (
(output_table["ca_country"] == q09_part2_ca_country)
& (output_table["ca_state"].str.contains("|".join(q09_part2_ca_state_IN)))
& (output_table["ss_net_profit"] >= q09_part2_net_profit_min)
& (output_table["ss_net_profit"] <= q09_part2_net_profit_max)
)
| (
(output_table["ca_country"] == q09_part3_ca_country)
& (output_table["ca_state"].str.contains("|".join(q09_part3_ca_state_IN)))
& (output_table["ss_net_profit"] >= q09_part3_net_profit_min)
& (output_table["ss_net_profit"] <= q09_part3_net_profit_max)
)
].reset_index(drop=True)
output_table = output_table.drop(
columns=[
"ss_addr_sk",
"ca_address_sk",
"ca_country",
"ca_state",
"ss_net_profit",
]
)
### this is a scaler so no need to transform
result = output_table["ss_quantity"].sum().persist()
result = result.compute()
result_df = cudf.DataFrame({"sum(ss_quantity)": [result]})
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q20/gpu_bdb_query_20.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q20_utils import (
get_clusters,
read_tables
)
from dask.distributed import wait
def remove_inf_and_nulls(df, column_names, value=0.0):
"""
Replace all nulls, inf, -inf with value column_name from df
"""
to_replace_dict = dict.fromkeys(column_names, [np.inf, -np.inf])
value_dict = dict.fromkeys(column_names, 0.0)
# Fill nulls for ratio columns with 0.0
df.fillna(value_dict, inplace=True)
# Replace inf and -inf with 0.0
df.replace(to_replace_dict, value_dict, inplace=True)
return df
def main(client, config):
store_sales_df, store_returns_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
n_workers = len(client.scheduler_info()["workers"])
### going via repartition for split_out drop duplicates
unique_sales = store_sales_df[
["ss_ticket_number", "ss_customer_sk"]
].map_partitions(lambda df: df.drop_duplicates())
unique_sales = unique_sales.shuffle(on=["ss_customer_sk"])
unique_sales = unique_sales.map_partitions(lambda df: df.drop_duplicates())
unique_sales = unique_sales.persist()
wait(unique_sales)
orders_count = (
unique_sales.groupby(by="ss_customer_sk")
.agg({"ss_ticket_number": "count"})
.reset_index()
)
orders_df = (
store_sales_df.groupby(by="ss_customer_sk")
.agg({"ss_item_sk": "count", "ss_net_paid": "sum"})
.reset_index()
)
### free up memory no longer needed
del store_sales_df
orders_df = orders_df.merge(orders_count, how="inner", on="ss_customer_sk")
orders_df = orders_df.rename(
columns={
"ss_customer_sk": "user_sk",
"ss_ticket_number": "orders_count",
"ss_item_sk": "orders_items",
"ss_net_paid": "orders_money",
}
)
orders_df = orders_df.persist()
wait(orders_df)
del unique_sales
returns_count = (
store_returns_df[["sr_ticket_number", "sr_customer_sk"]]
.drop_duplicates(split_out=n_workers)
.groupby(by="sr_customer_sk")
.agg({"sr_ticket_number": "count"})
.reset_index()
)
returns_df = (
store_returns_df.groupby(by="sr_customer_sk")
.agg({"sr_item_sk": "count", "sr_return_amt": "sum"})
.reset_index()
)
### free up memory no longer needed
del store_returns_df
returns_df = returns_df.merge(returns_count, how="inner", on="sr_customer_sk")
returns_df = returns_df.rename(
columns={
"sr_customer_sk": "user_sk",
"sr_ticket_number": "returns_count",
"sr_item_sk": "returns_items",
"sr_return_amt": "returns_money",
}
)
returns_df = returns_df.persist()
wait(returns_df)
final_df = orders_df.merge(returns_df, how="left", on="user_sk")
final_df["orderRatio"] = (
final_df["returns_count"] / final_df["orders_count"]
).round(7)
final_df["itemsRatio"] = (
final_df["returns_items"] / final_df["orders_items"]
).round(7)
final_df["monetaryRatio"] = (
final_df["returns_money"] / final_df["orders_money"]
).round(7)
ratio_columns = ["orderRatio", "itemsRatio", "monetaryRatio"]
final_df = final_df.map_partitions(
remove_inf_and_nulls, column_names=ratio_columns, value=0.0
)
final_df = final_df.rename(columns={"returns_count": "frequency"})
keep_cols = ["user_sk", "orderRatio", "itemsRatio", "monetaryRatio", "frequency"]
final_df = final_df[keep_cols]
final_df = final_df.fillna(0)
final_df = final_df.repartition(npartitions=1).persist()
wait(final_df)
final_df = final_df.sort_values(["user_sk"]).reset_index(drop=True)
final_df = final_df.persist()
wait(final_df)
feature_cols = ["orderRatio", "itemsRatio", "monetaryRatio", "frequency"]
results_dict = get_clusters(
client=client, ml_input_df=final_df, feature_cols=feature_cols
)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q20/gpu_bdb_query_20_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from dask.distributed import wait
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q20_utils import (
get_clusters,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = """
SELECT
ss_customer_sk AS user_sk,
round(CASE WHEN ((returns_count IS NULL) OR (orders_count IS NULL)
OR ((returns_count / orders_count) IS NULL) ) THEN 0.0
ELSE (returns_count / orders_count) END, 7) AS orderRatio,
round(CASE WHEN ((returns_items IS NULL) OR (orders_items IS NULL)
OR ((returns_items / orders_items) IS NULL) ) THEN 0.0
ELSE (returns_items / orders_items) END, 7) AS itemsRatio,
round(CASE WHEN ((returns_money IS NULL) OR (orders_money IS NULL)
OR ((returns_money / orders_money) IS NULL) ) THEN 0.0
ELSE (returns_money / orders_money) END, 7) AS monetaryRatio,
round(CASE WHEN ( returns_count IS NULL) THEN 0.0
ELSE returns_count END, 0) AS frequency
FROM
(
SELECT
ss_customer_sk,
-- return order ratio
CAST (COUNT(distinct(ss_ticket_number)) AS DOUBLE)
AS orders_count,
-- return ss_item_sk ratio
CAST (COUNT(ss_item_sk) AS DOUBLE) AS orders_items,
-- return monetary amount ratio
CAST(SUM( ss_net_paid ) AS DOUBLE) AS orders_money
FROM store_sales s
GROUP BY ss_customer_sk
) orders
LEFT OUTER JOIN
(
SELECT
sr_customer_sk,
-- return order ratio
CAST(count(distinct(sr_ticket_number)) AS DOUBLE)
AS returns_count,
-- return ss_item_sk ratio
CAST (COUNT(sr_item_sk) AS DOUBLE) AS returns_items,
-- return monetary amount ratio
CAST( SUM( sr_return_amt ) AS DOUBLE) AS returns_money
FROM store_returns
GROUP BY sr_customer_sk
) returned ON ss_customer_sk=sr_customer_sk
"""
final_df = c.sql(query)
final_df = final_df.fillna(0)
final_df = final_df.repartition(npartitions=1).persist()
wait(final_df)
final_df = final_df.sort_values(["user_sk"]).reset_index(drop=True)
final_df = final_df.persist()
wait(final_df)
feature_cols = ["orderRatio", "itemsRatio", "monetaryRatio", "frequency"]
results_dict = get_clusters(
client=client, ml_input_df=final_df, feature_cols=feature_cols
)
return results_dict
@annotate("QUERY20", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q20/gpu_bdb_query_20_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from dask import delayed
from dask.distributed import wait
import numpy as np
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
train_clustering_model
)
# q20 parameters
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
def get_clusters(client, ml_input_df, feature_cols):
"""
Takes the dask client, kmeans_input_df and feature columns.
Returns a dictionary matching the output required for q20
"""
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in ml_input_df[feature_cols].to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
labels = results_dict["cid_labels"]
labels_final = dask_cudf.from_cudf(labels, npartitions=ml_input_df.npartitions)
ml_input_df["label"] = labels_final.reset_index()[0]
output = ml_input_df[["user_sk", "label"]]
results_dict["cid_labels"] = output
return results_dict
def read_tables(data_dir, bc):
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("store_returns", os.path.join(data_dir, "store_returns/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = """
SELECT
ss_customer_sk AS user_sk,
round(CASE WHEN ((returns_count IS NULL) OR (orders_count IS NULL)
OR ((returns_count / orders_count) IS NULL) ) THEN 0.0
ELSE (returns_count / orders_count) END, 7) AS orderRatio,
round(CASE WHEN ((returns_items IS NULL) OR (orders_items IS NULL)
OR ((returns_items / orders_items) IS NULL) ) THEN 0.0
ELSE (returns_items / orders_items) END, 7) AS itemsRatio,
round(CASE WHEN ((returns_money IS NULL) OR (orders_money IS NULL)
OR ((returns_money / orders_money) IS NULL) ) THEN 0.0
ELSE (returns_money / orders_money) END, 7) AS monetaryRatio,
round(CASE WHEN ( returns_count IS NULL) THEN 0.0
ELSE returns_count END, 0) AS frequency
FROM
(
SELECT
ss_customer_sk,
-- return order ratio
CAST (COUNT(distinct(ss_ticket_number)) AS DOUBLE)
AS orders_count,
-- return ss_item_sk ratio
CAST (COUNT(ss_item_sk) AS DOUBLE) AS orders_items,
-- return monetary amount ratio
CAST(SUM( ss_net_paid ) AS DOUBLE) AS orders_money
FROM store_sales s
GROUP BY ss_customer_sk
) orders
LEFT OUTER JOIN
(
SELECT
sr_customer_sk,
-- return order ratio
CAST(count(distinct(sr_ticket_number)) AS DOUBLE)
AS returns_count,
-- return ss_item_sk ratio
CAST (COUNT(sr_item_sk) AS DOUBLE) AS returns_items,
-- return monetary amount ratio
CAST( SUM( sr_return_amt ) AS DOUBLE) AS returns_money
FROM store_returns
GROUP BY sr_customer_sk
) returned ON ss_customer_sk=sr_customer_sk
"""
final_df = bc.sql(query)
final_df = final_df.fillna(0)
final_df = final_df.repartition(npartitions=1).persist()
wait(final_df)
final_df = final_df.sort_values(["user_sk"]).reset_index(drop=True)
final_df = final_df.persist()
wait(final_df)
feature_cols = ["orderRatio", "itemsRatio", "monetaryRatio", "frequency"]
results_dict = get_clusters(
client=client, ml_input_df=final_df, feature_cols=feature_cols
)
return results_dict
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q21/gpu_bdb_query_21_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q21_utils import read_tables
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = """
SELECT
part_i.i_item_id AS i_item_id,
part_i.i_item_desc AS i_item_desc,
part_s.s_store_id AS s_store_id,
part_s.s_store_name AS s_store_name,
CAST(SUM(part_ss.ss_quantity) AS BIGINT) AS store_sales_quantity,
CAST(SUM(part_sr.sr_return_quantity) AS BIGINT) AS store_returns_quantity,
CAST(SUM(part_ws.ws_quantity) AS BIGINT) AS web_sales_quantity
FROM
(
SELECT
sr_item_sk,
sr_customer_sk,
sr_ticket_number,
sr_return_quantity
FROM
store_returns sr,
date_dim d2
WHERE d2.d_year = 2003
AND d2.d_moy BETWEEN 1 AND 7 --which were returned in the next six months
AND sr.sr_returned_date_sk = d2.d_date_sk
) part_sr
INNER JOIN
(
SELECT
ws_item_sk,
ws_bill_customer_sk,
ws_quantity
FROM
web_sales ws,
date_dim d3
-- in the following three years (re-purchased by the returning customer afterwards through the web sales channel)
WHERE d3.d_year BETWEEN 2003 AND 2005
AND ws.ws_sold_date_sk = d3.d_date_sk
) part_ws ON
(
part_sr.sr_item_sk = part_ws.ws_item_sk
AND part_sr.sr_customer_sk = part_ws.ws_bill_customer_sk
) INNER JOIN
(
SELECT
ss_item_sk,
ss_store_sk,
ss_customer_sk,
ss_ticket_number,
ss_quantity
FROM
store_sales ss,
date_dim d1
WHERE d1.d_year = 2003
AND d1.d_moy = 1
AND ss.ss_sold_date_sk = d1.d_date_sk
) part_ss ON
(
part_ss.ss_ticket_number = part_sr.sr_ticket_number
AND part_ss.ss_item_sk = part_sr.sr_item_sk
AND part_ss.ss_customer_sk = part_sr.sr_customer_sk
)
INNER JOIN store part_s ON
(
part_s.s_store_sk = part_ss.ss_store_sk
)
INNER JOIN item part_i ON
(
part_i.i_item_sk = part_ss.ss_item_sk
)
GROUP BY
part_i.i_item_id,
part_i.i_item_desc,
part_s.s_store_id,
part_s.s_store_name
ORDER BY
part_i.i_item_id,
part_i.i_item_desc,
part_s.s_store_id,
part_s.s_store_name
LIMIT 100
"""
result = c.sql(query)
result['i_item_desc'] = result['i_item_desc'].str.strip()
return result
@annotate("QUERY21", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q21/gpu_bdb_query_21.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.merge_util import hash_merge
from bdb_tools.q21_utils import read_tables
from dask.distributed import wait
q21_year = 2003
q21_month = 1
q21_limit = 100
def main(client, config):
(
store_sales_df,
date_dim_df,
web_sales_df,
store_returns_df,
store_table_df,
item_table_df,
) = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
# SELECT sr_item_sk, sr_customer_sk, sr_ticket_number, sr_return_quantity
# FROM
# store_returns sr,
# date_dim d2
# WHERE d2.d_year = ${hiveconf:q21_year}
# AND d2.d_moy BETWEEN ${hiveconf:q21_month} AND ${hiveconf:q21_month} + 6 --which were returned in the next six months
# AND sr.sr_returned_date_sk = d2.d_date_sk
d2 = date_dim_df.query(
f"d_year == {q21_year} and d_moy >= {q21_month} and d_moy <= {q21_month+6}",
meta=date_dim_df._meta,
).reset_index(drop=True)
part_sr = store_returns_df.merge(
d2, left_on="sr_returned_date_sk", right_on="d_date_sk", how="inner"
)
cols_2_keep = [
"sr_item_sk",
"sr_customer_sk",
"sr_ticket_number",
"sr_return_quantity",
]
part_sr = part_sr[cols_2_keep]
part_sr = part_sr.persist()
wait(part_sr)
# SELECT
# ws_item_sk, ws_bill_customer_sk, ws_quantity
# FROM
# web_sales ws,
# date_dim d3
# WHERE d3.d_year BETWEEN ${hiveconf:q21_year} AND ${hiveconf:q21_year} + 2 -- in the following three years (re-purchased by the returning customer afterwards through
# the web sales channel)
# AND ws.ws_sold_date_sk = d3.d_date_sk
# ) part_ws
d3 = date_dim_df.query(
f"d_year >= {q21_year} and d_year <= {q21_year + 2}", meta=date_dim_df._meta
)
part_ws = web_sales_df.merge(
d3, left_on="ws_sold_date_sk", right_on="d_date_sk", how="inner"
)
cols_2_keep = ["ws_item_sk", "ws_bill_customer_sk", "ws_quantity"]
part_ws = part_ws[cols_2_keep]
part_ws = part_ws.persist()
wait(part_ws)
# part_ws ON (
# part_sr.sr_item_sk = part_ws.ws_item_sk
# AND part_sr.sr_customer_sk = part_ws.ws_bill_customer_sk
part_ws_part_sr_m = hash_merge(
lhs=part_sr,
rhs=part_ws,
left_on=["sr_item_sk", "sr_customer_sk"],
right_on=["ws_item_sk", "ws_bill_customer_sk"],
how="inner",
)
cols_2_keep = [
"sr_item_sk",
"sr_customer_sk",
"sr_ticket_number",
"sr_return_quantity",
"ws_quantity",
]
part_ws_part_sr_m = part_ws_part_sr_m[cols_2_keep]
part_ws_part_sr_m = part_ws_part_sr_m.persist()
wait(part_ws_part_sr_m)
del part_sr, part_ws
# SELECT ss_item_sk, ss_store_sk, ss_customer_sk, ss_ticket_number, ss_quantity
# FROM
# store_sales ss,
# date_dim d1
# WHERE d1.d_year = ${hiveconf:q21_year}
# AND d1.d_moy = ${hiveconf:q21_month}
# AND ss.ss_sold_date_sk = d1.d_date_sk
# ) part_ss
d1 = date_dim_df.query(
f"d_year == {q21_year} and d_moy == {q21_month} ", meta=date_dim_df._meta
)
part_ss = store_sales_df.merge(
d1, left_on="ss_sold_date_sk", right_on="d_date_sk", how="inner"
)
cols_2_keep = [
"ss_item_sk",
"ss_store_sk",
"ss_customer_sk",
"ss_ticket_number",
"ss_quantity",
]
part_ss = part_ss[cols_2_keep]
# part_ss ON (
# part_ss.ss_ticket_number = part_sr.sr_ticket_number
# AND part_ss.ss_item_sk = part_sr.sr_item_sk
# AND part_ss.ss_customer_sk = part_sr.sr_customer_sk
part_ws_part_sr_m_part_ss_join_df = hash_merge(
lhs=part_ss,
rhs=part_ws_part_sr_m,
left_on=["ss_ticket_number", "ss_item_sk", "ss_customer_sk"],
right_on=["sr_ticket_number", "sr_item_sk", "sr_customer_sk"],
how="inner",
)
cols_2_keep = [
"ss_store_sk",
"ss_quantity",
"sr_return_quantity",
"ws_quantity",
"ss_item_sk",
]
part_ws_part_sr_m_part_ss_join_df = part_ws_part_sr_m_part_ss_join_df[cols_2_keep]
# INNER JOIN store part_s ON (
# part_s.s_store_sk = part_ss.ss_store_sk
# )
part_ws_part_sr_m_part_ss_part_s_join_df = store_table_df.merge(
part_ws_part_sr_m_part_ss_join_df,
left_on="s_store_sk",
right_on="ss_store_sk",
how="inner",
)
cols_2_keep = [
"s_store_name",
"sr_return_quantity",
"ss_quantity",
"ws_quantity",
"s_store_id",
"ss_item_sk",
]
part_ws_part_sr_m_part_ss_part_s_join_df = part_ws_part_sr_m_part_ss_part_s_join_df[
cols_2_keep
]
# INNER JOIN item part_i ON (
# part_i.i_item_sk = part_ss.ss_item_sk
# )
final_df = item_table_df.merge(
part_ws_part_sr_m_part_ss_part_s_join_df,
left_on="i_item_sk",
right_on="ss_item_sk",
how="inner",
)
# GROUP BY
# part_i.i_item_id,
# part_i.i_item_desc,
# part_s.s_store_id,
# part_s.s_store_name
# ORDER BY
# part_i.i_item_id,
# part_i.i_item_desc,
# part_s.s_store_id,
# part_s.s_store_name
cols_2_keep = [
"i_item_id",
"i_item_desc",
"s_store_name",
"ss_quantity",
"sr_return_quantity",
"ws_quantity",
"s_store_id",
]
grouped_df = final_df[cols_2_keep]
agg_df = grouped_df.groupby(
by=["i_item_id", "i_item_desc", "s_store_id", "s_store_name"]
).agg({"ss_quantity": "sum", "sr_return_quantity": "sum", "ws_quantity": "sum"})
agg_df = agg_df.repartition(npartitions=1).persist()
sorted_agg_df = agg_df.reset_index().map_partitions(
lambda df: df.sort_values(
by=["i_item_id", "i_item_desc", "s_store_id", "s_store_name"]
)
)
sorted_agg_df = sorted_agg_df.head(q21_limit)
sorted_agg_df = sorted_agg_df.rename(
columns={
"ss_quantity": "store_sales_quantity",
"sr_return_quantity": "store_returns_quantity",
"ws_quantity": "web_sales_quantity",
}
)
sorted_agg_df["i_item_desc"] = sorted_agg_df["i_item_desc"].str.strip()
return sorted_agg_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q21/gpu_bdb_query_21_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
def read_tables(data_dir, bc):
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table("store_returns", os.path.join(data_dir, "store_returns/*.parquet"))
bc.create_table("store", os.path.join(data_dir, "store/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = """
SELECT
part_i.i_item_id AS i_item_id,
part_i.i_item_desc AS i_item_desc,
part_s.s_store_id AS s_store_id,
part_s.s_store_name AS s_store_name,
CAST(SUM(part_ss.ss_quantity) AS BIGINT) AS store_sales_quantity,
CAST(SUM(part_sr.sr_return_quantity) AS BIGINT) AS store_returns_quantity,
CAST(SUM(part_ws.ws_quantity) AS BIGINT) AS web_sales_quantity
FROM
(
SELECT
sr_item_sk,
sr_customer_sk,
sr_ticket_number,
sr_return_quantity
FROM
store_returns sr,
date_dim d2
WHERE d2.d_year = 2003
AND d2.d_moy BETWEEN 1 AND 7 --which were returned in the next six months
AND sr.sr_returned_date_sk = d2.d_date_sk
) part_sr
INNER JOIN
(
SELECT
ws_item_sk,
ws_bill_customer_sk,
ws_quantity
FROM
web_sales ws,
date_dim d3
-- in the following three years (re-purchased by the returning customer afterwards through the web sales channel)
WHERE d3.d_year BETWEEN 2003 AND 2005
AND ws.ws_sold_date_sk = d3.d_date_sk
) part_ws ON
(
part_sr.sr_item_sk = part_ws.ws_item_sk
AND part_sr.sr_customer_sk = part_ws.ws_bill_customer_sk
) INNER JOIN
(
SELECT
ss_item_sk,
ss_store_sk,
ss_customer_sk,
ss_ticket_number,
ss_quantity
FROM
store_sales ss,
date_dim d1
WHERE d1.d_year = 2003
AND d1.d_moy = 1
AND ss.ss_sold_date_sk = d1.d_date_sk
) part_ss ON
(
part_ss.ss_ticket_number = part_sr.sr_ticket_number
AND part_ss.ss_item_sk = part_sr.sr_item_sk
AND part_ss.ss_customer_sk = part_sr.sr_customer_sk
)
INNER JOIN store part_s ON
(
part_s.s_store_sk = part_ss.ss_store_sk
)
INNER JOIN item part_i ON
(
part_i.i_item_sk = part_ss.ss_item_sk
)
GROUP BY
part_i.i_item_id,
part_i.i_item_desc,
part_s.s_store_id,
part_s.s_store_name
ORDER BY
part_i.i_item_id,
part_i.i_item_desc,
part_s.s_store_id,
part_s.s_store_name
LIMIT 100
"""
result = bc.sql(query)
result['i_item_desc'] = result['i_item_desc'].str.strip()
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q16/gpu_bdb_query_16_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
import cudf
import datetime
from datetime import timedelta
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q16_utils import read_tables
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
date = datetime.datetime(2001, 3, 16)
start = (date + timedelta(days=-30)).strftime("%Y-%m-%d")
end = (date + timedelta(days=30)).strftime("%Y-%m-%d")
mid = date.strftime("%Y-%m-%d")
date_query = f"""
SELECT d_date_sk
FROM date_dim
WHERE CAST(d_date as DATE) IN (DATE '{start}', DATE '{mid}', DATE '{end}')
ORDER BY CAST(d_date as date) ASC
"""
dates = c.sql(date_query)
cpu_dates = dates["d_date_sk"].compute()
if isinstance(cpu_dates, cudf.Series):
cpu_dates = cpu_dates.to_pandas()
cpu_dates.index = list(range(0, cpu_dates.shape[0]))
last_query = f"""
SELECT w_state, i_item_id,
SUM
(
CASE WHEN ws_sold_date_sk < {str(cpu_dates[1])}
THEN ws_sales_price - COALESCE(wr_refunded_cash,0)
ELSE 0.0 END
) AS sales_before,
SUM
(
CASE WHEN ws_sold_date_sk >= {str(cpu_dates[1])}
THEN ws_sales_price - COALESCE(wr_refunded_cash,0)
ELSE 0.0 END
) AS sales_after
FROM
(
SELECT ws_item_sk,
ws_warehouse_sk,
ws_sold_date_sk,
ws_sales_price,
wr_refunded_cash
FROM web_sales ws
LEFT OUTER JOIN web_returns wr ON
(
ws.ws_order_number = wr.wr_order_number
AND ws.ws_item_sk = wr.wr_item_sk
)
WHERE ws_sold_date_sk BETWEEN {str(cpu_dates[0])}
AND {str(cpu_dates[2])}
) a1
JOIN item i ON a1.ws_item_sk = i.i_item_sk
JOIN warehouse w ON a1.ws_warehouse_sk = w.w_warehouse_sk
GROUP BY w_state,i_item_id
ORDER BY w_state,i_item_id
LIMIT 100
"""
result = c.sql(last_query)
return result
@annotate("QUERY16", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q16/gpu_bdb_query_16_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import os
import datetime
from datetime import timedelta
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
def read_tables(data_dir, bc):
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table("web_returns", os.path.join(data_dir, "web_returns/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
bc.create_table("warehouse", os.path.join(data_dir, "warehouse/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
date = datetime.datetime(2001, 3, 16)
start = (date + timedelta(days=-30)).strftime("%Y-%m-%d")
end = (date + timedelta(days=30)).strftime("%Y-%m-%d")
mid = date.strftime("%Y-%m-%d")
date_query = f"""
SELECT d_date_sk
FROM date_dim
WHERE CAST(d_date as DATE) IN (DATE '{start}', DATE '{mid}', DATE '{end}')
ORDER BY CAST(d_date as date) ASC
"""
dates = bc.sql(date_query)
cpu_dates = dates["d_date_sk"].compute().to_pandas()
cpu_dates.index = list(range(0, cpu_dates.shape[0]))
last_query = f"""
SELECT w_state, i_item_id,
SUM
(
CASE WHEN ws_sold_date_sk < {str(cpu_dates[1])}
THEN ws_sales_price - COALESCE(wr_refunded_cash,0)
ELSE 0.0 END
) AS sales_before,
SUM
(
CASE WHEN ws_sold_date_sk >= {str(cpu_dates[1])}
THEN ws_sales_price - COALESCE(wr_refunded_cash,0)
ELSE 0.0 END
) AS sales_after
FROM
(
SELECT ws_item_sk,
ws_warehouse_sk,
ws_sold_date_sk,
ws_sales_price,
wr_refunded_cash
FROM web_sales ws
LEFT OUTER JOIN web_returns wr ON
(
ws.ws_order_number = wr.wr_order_number
AND ws.ws_item_sk = wr.wr_item_sk
)
WHERE ws_sold_date_sk BETWEEN {str(cpu_dates[0])}
AND {str(cpu_dates[2])}
) a1
JOIN item i ON a1.ws_item_sk = i.i_item_sk
JOIN warehouse w ON a1.ws_warehouse_sk = w.w_warehouse_sk
GROUP BY w_state,i_item_id
ORDER BY w_state,i_item_id
LIMIT 100
"""
result = bc.sql(last_query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q16/gpu_bdb_query_16.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
convert_datestring_to_days,
)
from bdb_tools.merge_util import hash_merge
from bdb_tools.q16_utils import read_tables
from dask.distributed import wait
import numpy as np
### conf
q16_date = "2001-03-16"
# INSERT INTO TABLE ${hiveconf:RESULT_TABLE}
# SELECT w_state, i_item_id,
# SUM(
# CASE WHEN (unix_timestamp(d_date,'yyyy-MM-dd') < unix_timestamp('${hiveconf:q16_date}','yyyy-MM-dd'))
# THEN ws_sales_price - COALESCE(wr_refunded_cash,0)
# ELSE 0.0 END
# ) AS sales_before,
# SUM(
# CASE WHEN (unix_timestamp(d_date,'yyyy-MM-dd') >= unix_timestamp('${hiveconf:q16_date}','yyyy-MM-dd'))
# THEN ws_sales_price - COALESCE(wr_refunded_cash,0)
# ELSE 0.0 END
# ) AS sales_after
def get_before_after_sales(df, q16_timestamp):
before_flag = df["d_date"] < q16_timestamp
after_flag = df["d_date"] >= q16_timestamp
df["sales_before"] = df["sales"].copy()
df.loc[~before_flag, "sales_before"] = 0.00
df["sales_after"] = df["sales"].copy()
df.loc[~after_flag, "sales_after"] = 0.00
return df
def main(client, config):
web_sales_df, web_returns_df, date_dim_df, item_df, warehouse_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
warehouse_df["w_state_code"] = warehouse_df[["w_state"]].categorize()["w_state"]
item_df["i_item_id_code"] = item_df[["i_item_id"]].categorize()["i_item_id"]
## persisting as you need it for length calculation and to prevent duplicate reading
## downstream
warehouse_df = warehouse_df.persist()
item_df = item_df.persist()
## casting down because of dtype incosistieny in cudf/dask due to cat columns
### https://github.com/rapidsai/cudf/issues/4093
wh_df_codes_min_signed_type = cudf.utils.dtypes.min_signed_type(
len(warehouse_df["w_state_code"].compute().cat.categories)
)
warehouse_df["w_state_code"] = warehouse_df["w_state_code"].cat.codes.astype(
wh_df_codes_min_signed_type
)
unique_states = warehouse_df[["w_state_code", "w_state"]].drop_duplicates()
warehouse_df = warehouse_df[["w_state_code", "w_warehouse_sk"]]
## casting down because of dtype incosistieny in cudf/dask due to cat columns
### https://github.com/rapidsai/cudf/issues/4093
item_df_codes_min_signed_type = cudf.utils.dtypes.min_signed_type(
len(item_df["i_item_id_code"].compute().cat.categories)
)
item_df["i_item_id_code"] = item_df["i_item_id_code"].cat.codes.astype(
item_df_codes_min_signed_type
)
unique_items = item_df[["i_item_id_code", "i_item_id"]].drop_duplicates()
item_df = item_df[["i_item_id_code", "i_item_sk"]]
# JOIN date_dim d ON a1.ws_sold_date_sk = d.d_date_sk
# AND unix_timestamp(d.d_date, 'yyyy-MM-dd') >= unix_timestamp('${hiveconf:q16_date}', 'yyyy-MM-dd') - 30*24*60*60 --subtract 30 days in seconds
# AND unix_timestamp(d.d_date, 'yyyy-MM-dd') <= unix_timestamp('${hiveconf:q16_date}', 'yyyy-MM-dd') + 30*24*60*60 --add 30 days in seconds
##todo: remove below
date_dim_cov_df = date_dim_df.map_partitions(convert_datestring_to_days)
q16_timestamp = np.datetime64(q16_date, "D").astype(int)
filtered_date_df = date_dim_cov_df.query(
f"d_date >={q16_timestamp- 30} and d_date <= {q16_timestamp+30}",
meta=date_dim_cov_df._meta,
).reset_index(drop=True)
web_sales_df = web_sales_df.merge(
filtered_date_df,
left_on=["ws_sold_date_sk"],
right_on=["d_date_sk"],
how="inner",
)
cols_2_keep = [
"ws_order_number",
"ws_item_sk",
"ws_warehouse_sk",
"ws_sales_price",
"d_date",
]
web_sales_df = web_sales_df[cols_2_keep]
web_sales_df = web_sales_df.persist()
wait(web_sales_df)
# SELECT *
# FROM web_sales ws
# LEFT OUTER JOIN web_returns wr ON (ws.ws_order_number = wr.wr_order_number
# AND ws.ws_item_sk = wr.wr_item_sk)
# ) a1
web_sales_web_returns_join = hash_merge(
lhs=web_sales_df,
rhs=web_returns_df,
left_on=["ws_order_number", "ws_item_sk"],
right_on=["wr_order_number", "wr_item_sk"],
how="left",
)
cols_2_keep = [
"ws_item_sk",
"ws_warehouse_sk",
"ws_sales_price",
"wr_refunded_cash",
"d_date",
]
web_sales_web_returns_join = web_sales_web_returns_join[cols_2_keep]
web_sales_web_returns_join = web_sales_web_returns_join.persist()
wait(web_sales_web_returns_join)
del web_sales_df
# JOIN item i ON a1.ws_item_sk = i.i_item_sk
web_sales_web_returns_item_join = web_sales_web_returns_join.merge(
item_df, left_on=["ws_item_sk"], right_on=["i_item_sk"], how="inner"
)
cols_2_keep = [
"ws_warehouse_sk",
"ws_sales_price",
"wr_refunded_cash",
"i_item_id_code",
"d_date",
]
web_sales_web_returns_item_join = web_sales_web_returns_item_join[cols_2_keep]
# JOIN warehouse w ON a1.ws_warehouse_sk = w.w_warehouse_sk
web_sales_web_returns_item_warehouse_join = web_sales_web_returns_item_join.merge(
warehouse_df,
left_on=["ws_warehouse_sk"],
right_on=["w_warehouse_sk"],
how="inner",
)
merged_df = web_sales_web_returns_item_warehouse_join[
[
"ws_sales_price",
"wr_refunded_cash",
"i_item_id_code",
"w_state_code",
"d_date",
]
]
merged_df["sales"] = web_sales_web_returns_item_warehouse_join[
"ws_sales_price"
].fillna(0) - web_sales_web_returns_item_warehouse_join["wr_refunded_cash"].fillna(
0
)
sales_df = merged_df[["i_item_id_code", "w_state_code", "d_date", "sales"]]
sales_before_after_df = sales_df.map_partitions(
get_before_after_sales, q16_timestamp
)
cols_2_keep = ["i_item_id_code", "w_state_code", "sales_before", "sales_after"]
sales_before_after_df = sales_before_after_df[cols_2_keep]
## group by logic
group_cols = ["w_state_code", "i_item_id_code"]
agg_df = sales_before_after_df.groupby(group_cols, sort=True).agg(
{"sales_before": "sum", "sales_after": "sum"}
)
agg_df = agg_df.reset_index(drop=False)
agg_df = agg_df.loc[:99].persist()
agg_df = agg_df.reset_index(drop=False)
agg_df.columns = [
"sorted_grp_index",
"w_state_code",
"i_item_id_code",
"sales_before",
"sales_after",
]
agg_df = agg_df.merge(unique_states, how="left", on="w_state_code")[
["sorted_grp_index", "w_state", "i_item_id_code", "sales_before", "sales_after"]
]
agg_df = agg_df.merge(unique_items, how="left", on="i_item_id_code")[
["sorted_grp_index", "w_state", "i_item_id", "sales_before", "sales_after"]
]
agg_df = agg_df.sort_values(by=["sorted_grp_index"])
## only 100 rows so computing is fine
return agg_df[["w_state", "i_item_id", "sales_before", "sales_after"]].compute()
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q26/gpu_bdb_query_26_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
get_clusters
)
from bdb_tools.q26_utils import (
Q26_CATEGORY,
Q26_ITEM_COUNT,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = f"""
SELECT
ss.ss_customer_sk AS cid,
CAST( count(CASE WHEN i.i_class_id=1 THEN 1 ELSE NULL END) AS DOUBLE ) AS id1,
CAST( count(CASE WHEN i.i_class_id=2 THEN 1 ELSE NULL END) AS DOUBLE ) AS id2,
CAST( count(CASE WHEN i.i_class_id=3 THEN 1 ELSE NULL END) AS DOUBLE ) AS id3,
CAST( count(CASE WHEN i.i_class_id=4 THEN 1 ELSE NULL END) AS DOUBLE ) AS id4,
CAST( count(CASE WHEN i.i_class_id=5 THEN 1 ELSE NULL END) AS DOUBLE ) AS id5,
CAST( count(CASE WHEN i.i_class_id=6 THEN 1 ELSE NULL END) AS DOUBLE ) AS id6,
CAST( count(CASE WHEN i.i_class_id=7 THEN 1 ELSE NULL END) AS DOUBLE ) AS id7,
CAST( count(CASE WHEN i.i_class_id=8 THEN 1 ELSE NULL END) AS DOUBLE ) AS id8,
CAST( count(CASE WHEN i.i_class_id=9 THEN 1 ELSE NULL END) AS DOUBLE ) AS id9,
CAST( count(CASE WHEN i.i_class_id=10 THEN 1 ELSE NULL END) AS DOUBLE ) AS id10,
CAST( count(CASE WHEN i.i_class_id=11 THEN 1 ELSE NULL END) AS DOUBLE ) AS id11,
CAST( count(CASE WHEN i.i_class_id=12 THEN 1 ELSE NULL END) AS DOUBLE ) AS id12,
CAST( count(CASE WHEN i.i_class_id=13 THEN 1 ELSE NULL END) AS DOUBLE ) AS id13,
CAST( count(CASE WHEN i.i_class_id=14 THEN 1 ELSE NULL END) AS DOUBLE ) AS id14,
CAST( count(CASE WHEN i.i_class_id=15 THEN 1 ELSE NULL END) AS DOUBLE ) AS id15
FROM store_sales ss
INNER JOIN item i
ON
(
ss.ss_item_sk = i.i_item_sk
AND i.i_category IN ('{Q26_CATEGORY}')
AND ss.ss_customer_sk IS NOT NULL
)
GROUP BY ss.ss_customer_sk
HAVING count(ss.ss_item_sk) > {Q26_ITEM_COUNT}
ORDER BY cid
"""
result = c.sql(query)
result = result.repartition(npartitions=1)
result_ml = result.set_index('cid')
ml_result_dict = get_clusters(client=client, kmeans_input_df=result_ml)
return ml_result_dict
@annotate("QUERY26", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q26/gpu_bdb_query_26.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
get_clusters
)
from bdb_tools.q26_utils import (
Q26_CATEGORY,
Q26_ITEM_COUNT,
read_tables
)
import numpy as np
def agg_count_distinct(df, group_key, counted_key):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
return (
df.drop_duplicates([group_key, counted_key])
.groupby(group_key)[counted_key]
.count()
)
def main(client, config):
import cudf
ss_ddf, items_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
items_filtered = items_ddf[items_ddf.i_category == Q26_CATEGORY].reset_index(
drop=True
)
items_filtered = items_filtered[["i_item_sk", "i_class_id"]]
f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True)
merged_ddf = f_ss_ddf.merge(
items_filtered, left_on="ss_item_sk", right_on="i_item_sk", how="inner"
)
keep_cols = ["ss_customer_sk", "i_class_id"]
merged_ddf = merged_ddf[keep_cols]
# One-Hot-Encode i_class_id
merged_ddf = merged_ddf.map_partitions(
cudf.get_dummies,
columns=["i_class_id"],
prefix="id",
cats={"i_class_id": np.arange(1, 16, dtype="int32")},
prefix_sep="",
dtype="float32",
)
merged_ddf["total"] = 1.0 # Will keep track of total count
all_categories = ["total"] + ["id%d" % i for i in range(1, 16)]
# Aggregate using agg to get sorted ss_customer_sk
agg_dict = dict.fromkeys(all_categories, "sum")
rollup_ddf = merged_ddf.groupby("ss_customer_sk", as_index=False).agg(agg_dict)
rollup_ddf = rollup_ddf.sort_values(["ss_customer_sk"])
rollup_ddf = rollup_ddf.set_index("ss_customer_sk")
rollup_ddf = rollup_ddf[rollup_ddf.total > Q26_ITEM_COUNT][all_categories[1:]]
# Prepare data for KMeans clustering
rollup_ddf = rollup_ddf.astype("float64")
kmeans_input_df = rollup_ddf.persist()
results_dict = get_clusters(client=client, kmeans_input_df=kmeans_input_df)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q26/gpu_bdb_query_26_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
train_clustering_model
)
from dask import delayed
# -------- Q26 -----------
q26_i_category_IN = "Books"
q26_count_ss_item_sk = 5
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
def get_clusters(client, kmeans_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in kmeans_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = kmeans_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Based on CDH6.1 q26-result formatting
results_dict["cid_labels"] = output
return results_dict
def read_tables(data_dir, bc):
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
SELECT
ss.ss_customer_sk AS cid,
CAST( count(CASE WHEN i.i_class_id=1 THEN 1 ELSE NULL END) AS DOUBLE ) AS id1,
CAST( count(CASE WHEN i.i_class_id=2 THEN 1 ELSE NULL END) AS DOUBLE ) AS id2,
CAST( count(CASE WHEN i.i_class_id=3 THEN 1 ELSE NULL END) AS DOUBLE ) AS id3,
CAST( count(CASE WHEN i.i_class_id=4 THEN 1 ELSE NULL END) AS DOUBLE ) AS id4,
CAST( count(CASE WHEN i.i_class_id=5 THEN 1 ELSE NULL END) AS DOUBLE ) AS id5,
CAST( count(CASE WHEN i.i_class_id=6 THEN 1 ELSE NULL END) AS DOUBLE ) AS id6,
CAST( count(CASE WHEN i.i_class_id=7 THEN 1 ELSE NULL END) AS DOUBLE ) AS id7,
CAST( count(CASE WHEN i.i_class_id=8 THEN 1 ELSE NULL END) AS DOUBLE ) AS id8,
CAST( count(CASE WHEN i.i_class_id=9 THEN 1 ELSE NULL END) AS DOUBLE ) AS id9,
CAST( count(CASE WHEN i.i_class_id=10 THEN 1 ELSE NULL END) AS DOUBLE ) AS id10,
CAST( count(CASE WHEN i.i_class_id=11 THEN 1 ELSE NULL END) AS DOUBLE ) AS id11,
CAST( count(CASE WHEN i.i_class_id=12 THEN 1 ELSE NULL END) AS DOUBLE ) AS id12,
CAST( count(CASE WHEN i.i_class_id=13 THEN 1 ELSE NULL END) AS DOUBLE ) AS id13,
CAST( count(CASE WHEN i.i_class_id=14 THEN 1 ELSE NULL END) AS DOUBLE ) AS id14,
CAST( count(CASE WHEN i.i_class_id=15 THEN 1 ELSE NULL END) AS DOUBLE ) AS id15
FROM store_sales ss
INNER JOIN item i
ON
(
ss.ss_item_sk = i.i_item_sk
AND i.i_category IN ('{q26_i_category_IN}')
AND ss.ss_customer_sk IS NOT NULL
)
GROUP BY ss.ss_customer_sk
HAVING count(ss.ss_item_sk) > {q26_count_ss_item_sk}
ORDER BY cid
"""
result = bc.sql(query)
result = result.repartition(npartitions=1)
result_ml = result.set_index('cid')
ml_result_dict = get_clusters(client=client, kmeans_input_df=result_ml)
return ml_result_dict
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q25/gpu_bdb_query_25_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
get_clusters
)
from bdb_tools.q25_utils import (
q25_date,
read_tables
)
def agg_count_distinct(df, group_key, counted_key):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
### going via repartition for split_out drop duplicates
unique_df = df[[group_key, counted_key]].map_partitions(
lambda df: df.drop_duplicates()
)
unique_df = unique_df.shuffle(on=[group_key])
unique_df = unique_df.map_partitions(lambda df: df.drop_duplicates())
unique_df = unique_df.groupby(group_key)[counted_key].count()
return unique_df.reset_index(drop=False)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
ss_join_query= f"""
SELECT
ss_customer_sk,
ss_sold_date_sk,
ss_net_paid,
ss_ticket_number
FROM
store_sales ss
JOIN
date_dim d ON ss.ss_sold_date_sk = d.d_date_sk
WHERE
CAST(d.d_date AS DATE) > DATE '{q25_date}'
AND
ss_customer_sk IS NOT NULL
"""
ws_join_query = f"""
SELECT
ws_bill_customer_sk,
ws_order_number,
ws_sold_date_sk,
ws_net_paid
FROM
web_sales ws
JOIN
date_dim d ON ws.ws_sold_date_sk = d.d_date_sk
WHERE
CAST(d.d_date AS DATE) > DATE '{q25_date}'
AND
ws_bill_customer_sk IS NOT NULL
"""
ss_merged_df = c.sql(ss_join_query)
ws_merged_df = c.sql(ws_join_query)
c.create_table('ss_merged_table', ss_merged_df, persist=False)
c.create_table('ws_merged_table', ws_merged_df, persist=False)
ss_agg_query = """
SELECT
ss_customer_sk AS cid,
-- count(distinct ss_ticket_number) AS frequency, # distinct count groupby OOMS with dask-sql
max(ss_sold_date_sk) AS most_recent_date,
CAST( SUM(ss_net_paid) AS DOUBLE) AS amount
FROM ss_merged_table
GROUP BY ss_customer_sk
"""
ws_agg_query= """
SELECT
ws_bill_customer_sk AS cid,
-- count(distinct ws_order_number) AS frequency, # distinct count groupby OOMS with dask-sql
max(ws_sold_date_sk) AS most_recent_date,
CAST( SUM(ws_net_paid) AS DOUBLE) AS amount
FROM ws_merged_table
GROUP BY ws_bill_customer_sk
"""
ss_distinct_count_agg = agg_count_distinct(ss_merged_df,'ss_customer_sk','ss_ticket_number')
ss_distinct_count_agg = ss_distinct_count_agg.rename(columns={'ss_customer_sk':'cid',
'ss_ticket_number':'frequency'})
ss_agg_df = c.sql(ss_agg_query)
### add distinct count
ss_agg_df = ss_agg_df.merge(ss_distinct_count_agg)
ws_distinct_count_agg = agg_count_distinct(ws_merged_df,'ws_bill_customer_sk','ws_order_number')
ws_distinct_count_agg = ws_distinct_count_agg.rename(columns={'ws_bill_customer_sk':'cid',
'ws_order_number':'frequency'})
ws_agg_df = c.sql(ws_agg_query)
### add distinct count
ws_agg_df = ws_agg_df.merge(ws_distinct_count_agg)
c.create_table('ss_agg_df', ss_agg_df, persist=False)
c.create_table('ws_agg_df', ws_agg_df, persist=False)
result_query = '''
WITH concat_table AS
(
SELECT * FROM ss_agg_df
UNION ALL
SELECT * FROM ws_agg_df
)
SELECT
cid AS cid,
CASE WHEN 37621 - max(most_recent_date) < 60 THEN 1.0
ELSE 0.0 END AS recency, -- 37621 == 2003-01-02
CAST( SUM(frequency) AS BIGINT) AS frequency, --total frequency
CAST( SUM(amount) AS DOUBLE) AS amount --total amount
FROM concat_table
GROUP BY cid
ORDER BY cid
'''
cluster_input_ddf = c.sql(result_query)
# Prepare df for KMeans clustering
cluster_input_ddf["recency"] = cluster_input_ddf["recency"].astype("int64")
cluster_input_ddf = cluster_input_ddf.repartition(npartitions=1)
cluster_input_ddf = cluster_input_ddf.persist()
cluster_input_ddf = cluster_input_ddf.set_index('cid')
results_dict = get_clusters(client=client, kmeans_input_df=cluster_input_ddf)
return results_dict
@annotate("QUERY25", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q25/gpu_bdb_query_25.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
convert_datestring_to_days,
get_clusters
)
from bdb_tools.q25_utils import (
q25_date,
read_tables
)
def agg_count_distinct(df, group_key, counted_key, client):
"""Returns a Series that is the result of counting distinct instances of 'counted_key' within each 'group_key'.
The series' index will have one entry per unique 'group_key' value.
Workaround for lack of nunique aggregate function on Dask df.
"""
### going via repartition for split_out drop duplicates
unique_df = df[[group_key, counted_key]].map_partitions(
lambda df: df.drop_duplicates()
)
unique_df = unique_df.shuffle(on=[group_key])
unique_df = unique_df.map_partitions(lambda df: df.drop_duplicates())
return unique_df.groupby(group_key)[counted_key].count(split_every=2)
def main(client, config):
ss_ddf, ws_ddf, datedim_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
datedim_ddf = datedim_ddf.map_partitions(convert_datestring_to_days)
min_date = np.datetime64(q25_date, "D").astype(int)
# Filter by date
valid_dates_ddf = datedim_ddf[datedim_ddf["d_date"] > min_date].reset_index(
drop=True
)
f_ss_ddf = ss_ddf[ss_ddf["ss_customer_sk"].notnull()].reset_index(drop=True)
f_ws_ddf = ws_ddf[ws_ddf["ws_bill_customer_sk"].notnull()].reset_index(drop=True)
# Merge
ss_merged_df = f_ss_ddf.merge(
valid_dates_ddf, left_on="ss_sold_date_sk", right_on="d_date_sk", how="inner"
)
ws_merged_df = f_ws_ddf.merge(
valid_dates_ddf, left_on="ws_sold_date_sk", right_on="d_date_sk", how="inner"
)
# Roll up store sales
agg_store_sales_ddf = ss_merged_df.groupby("ss_customer_sk").agg(
{"ss_sold_date_sk": "max", "ss_net_paid": "sum"}
)
agg_store_sales_ddf["frequency"] = agg_count_distinct(
ss_merged_df, "ss_customer_sk", "ss_ticket_number", client=client
) # Simulate count distinct
# Same rollup, just different columns for web sales
agg_web_sales_ddf = ws_merged_df.groupby("ws_bill_customer_sk").agg(
{"ws_sold_date_sk": "max", "ws_net_paid": "sum"}
)
agg_web_sales_ddf["frequency"] = agg_count_distinct(
ws_merged_df, "ws_bill_customer_sk", "ws_order_number", client=client
) # Simulate count distinct
agg_store_sales_ddf = agg_store_sales_ddf.reset_index()
agg_web_sales_ddf = agg_web_sales_ddf.reset_index()
shared_columns = ["cid", "most_recent_date", "amount", "frequency"]
agg_store_sales_ddf.columns = shared_columns
agg_web_sales_ddf.columns = shared_columns
agg_sales_ddf = dask_cudf.concat([agg_store_sales_ddf, agg_web_sales_ddf])
cluster_input_ddf = agg_sales_ddf.groupby("cid", as_index=False).agg(
{"most_recent_date": "max", "frequency": "sum", "amount": "sum"}
)
cluster_input_ddf["recency"] = (37621 - cluster_input_ddf["most_recent_date"]) < 60
cluster_input_ddf = cluster_input_ddf.sort_values(["cid"])
cluster_input_ddf = cluster_input_ddf.set_index("cid")
# Reorder to match refererence examples
cluster_input_ddf = cluster_input_ddf[["recency", "frequency", "amount"]]
# Prepare df for KMeans clustering
cluster_input_ddf["recency"] = cluster_input_ddf["recency"].astype("int64")
cluster_input_ddf["amount"] = cluster_input_ddf["amount"].astype("float64")
cluster_input_ddf = cluster_input_ddf.persist()
results_dict = get_clusters(client=client, kmeans_input_df=cluster_input_ddf)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q25/gpu_bdb_query_25_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
train_clustering_model
)
from dask import delayed
# -------- Q25 -----------
# -- store_sales and web_sales date
q25_date = "2002-01-02"
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
def get_clusters(client, ml_input_df):
import dask_cudf
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in ml_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = ml_input_df.index.to_frame().reset_index(drop=True)
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
# Based on CDH6.1 q25-result formatting
results_dict["cid_labels"] = output
return results_dict
def read_tables(data_dir, bc):
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
WITH concat_table AS
(
(
SELECT
ss_customer_sk AS cid,
count(distinct ss_ticket_number) AS frequency,
max(ss_sold_date_sk) AS most_recent_date,
CAST( SUM(ss_net_paid) AS DOUBLE) AS amount
FROM store_sales ss
JOIN date_dim d ON ss.ss_sold_date_sk = d.d_date_sk
WHERE CAST(d.d_date AS DATE) > DATE '{q25_date}'
AND ss_customer_sk IS NOT NULL
GROUP BY ss_customer_sk
) union all
(
SELECT
ws_bill_customer_sk AS cid,
count(distinct ws_order_number) AS frequency,
max(ws_sold_date_sk) AS most_recent_date,
CAST( SUM(ws_net_paid) AS DOUBLE) AS amount
FROM web_sales ws
JOIN date_dim d ON ws.ws_sold_date_sk = d.d_date_sk
WHERE CAST(d.d_date AS DATE) > DATE '{q25_date}'
AND ws_bill_customer_sk IS NOT NULL
GROUP BY ws_bill_customer_sk
)
)
SELECT
cid AS cid,
CASE WHEN 37621 - max(most_recent_date) < 60 THEN 1.0
ELSE 0.0 END AS recency, -- 37621 == 2003-01-02
CAST( SUM(frequency) AS BIGINT) AS frequency, --total frequency
CAST( SUM(amount) AS DOUBLE) AS amount --total amount
FROM concat_table
GROUP BY cid
ORDER BY cid
"""
cluster_input_ddf = bc.sql(query)
# Prepare df for KMeans clustering
cluster_input_ddf["recency"] = cluster_input_ddf["recency"].astype("int64")
cluster_input_ddf = cluster_input_ddf.repartition(npartitions=1)
cluster_input_ddf = cluster_input_ddf.persist()
cluster_input_ddf = cluster_input_ddf.set_index('cid')
results_dict = get_clusters(client=client, ml_input_df=cluster_input_ddf)
return results_dict
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q27/gpu_bdb_query_27_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences
)
from bdb_tools.cluster_startup import attach_to_cluster
from dask.distributed import wait
import spacy
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
# -------- Q27 -----------
q27_pr_item_sk = 10002
EOL_CHAR = "."
def read_tables(data_dir, bc):
bc.create_table("product_reviews", os.path.join(data_dir, "product_reviews/*.parquet"))
def ner_parser(df, col_string, batch_size=256):
spacy.require_gpu()
nlp = spacy.load("en_core_web_sm")
docs = nlp.pipe(df[col_string], disable=["tagger", "parser"], batch_size=batch_size)
out = []
for doc in docs:
l = [ent.text for ent in doc.ents if ent.label_ == "ORG"]
val = ", "
l = val.join(l)
out.append(l)
df["company_name_list"] = out
return df
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
import dask_cudf
query = f"""
SELECT pr_review_sk, pr_item_sk, pr_review_content
FROM product_reviews
WHERE pr_item_sk = {q27_pr_item_sk}
"""
product_reviews_df = bc.sql(query)
sentences = product_reviews_df.map_partitions(
create_sentences_from_reviews,
review_column="pr_review_content",
end_of_line_char=EOL_CHAR,
)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
del product_reviews_df
# Do the NER
sentences = sentences.to_dask_dataframe()
ner_parsed = sentences.map_partitions(ner_parser, "sentence")
ner_parsed = dask_cudf.from_dask_dataframe(ner_parsed)
ner_parsed = ner_parsed.persist()
wait(ner_parsed)
ner_parsed = ner_parsed[ner_parsed.company_name_list != ""]
# separate NER results into one row per found company
repeated_names = ner_parsed.map_partitions(
create_words_from_sentences,
sentence_column="company_name_list",
global_position_column="sentence_tokenized_global_pos",
delimiter="é",
)
del sentences
# recombine
repeated_names = repeated_names.persist()
wait(repeated_names)
bc.create_table('repeated_names', repeated_names)
ner_parsed = ner_parsed.persist()
wait(ner_parsed)
bc.create_table('ner_parsed', ner_parsed)
query = f"""
SELECT review_idx_global_pos as review_sk,
CAST({q27_pr_item_sk} AS BIGINT) as item_sk,
word as company_name,
sentence as review_sentence
FROM repeated_names left join ner_parsed
ON sentence_idx_global_pos = sentence_tokenized_global_pos
ORDER BY review_idx_global_pos, item_sk, word, sentence
"""
recombined = bc.sql(query)
bc.drop_table("repeated_names")
bc.drop_table("ner_parsed")
del ner_parsed
del repeated_names
return recombined
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q27/gpu_bdb_query_hf_27.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implimentation Details
# In this query we do NER(Named Entity Resolution) to find competeter mentions in the review
### The ner model is used is based on Hugging Face's AutoModelForTokenClassification transformer model
### The inference part of workflow to get the token labels are in q27_bert_utils.run_inference_on_df
### The sentences are gathered using EOL char as `.`
### The details for sentence gathering are at q27_get_review_sentence_utils.get_review_sentence
# Current limitation
### We don't do model based sentence boundary disambiguation
### We get empty sentence in 0.04% of the cases because of it
import rmm
import numpy as np
import os
import logging
import time
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.readers import build_reader
from dask.distributed import Client, wait, get_worker
### Query Specific Utils
from bdb_tools.q27_bert_utils import (
run_inference_on_df,
load_model,
create_vocab_table,
del_model_attribute,
)
from bdb_tools.q27_get_review_sentence_utils import get_review_sentence
# -------- Q27 -----------
q27_pr_item_sk = 10002
def read_tables(config):
### splitting by row groups for better parallelism
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=True,
)
product_reviews_cols = ["pr_item_sk", "pr_review_content", "pr_review_sk"]
product_reviews_df = table_reader.read(
"product_reviews", relevant_cols=product_reviews_cols
)
return product_reviews_df
def run_single_part_workflow(df, model_path):
"""
This function runs the entire ner workflow end2end on a single GPU
"""
import cudf
w_st = time.time()
worker = get_worker()
if hasattr(worker, "q27_model"):
model = worker.q27_model
else:
model = load_model(model_path)
worker.q27_model = model
id2vocab, vocab2id = create_vocab_table(os.path.join(model_path, "vocab.txt"))
vocab_hash_file = os.path.join(model_path, "vocab-hash.txt")
token_d, prediction_d = run_inference_on_df(
df, model, vocab_hash_file, batchsize=128
)
output_d = {}
for seq, pred_label in prediction_d.items():
if pred_label is not None:
sen_df = get_review_sentence(
token_d[seq], prediction_d[seq], vocab2id, id2vocab
)
review_df = token_d[seq]["df"][["pr_review_sk", "pr_item_sk"]]
review_df = review_df.reset_index(drop=False)
review_df.rename(columns={"index": "input_text_index"}, inplace=True)
output_d[seq] = sen_df.merge(review_df)[
["pr_review_sk", "pr_item_sk", "company_name", "review_sentence"]
]
del token_d, prediction_d
output_df = cudf.concat([o_df for o_df in output_d.values()])
output_df.rename(
columns={"pr_review_sk": "review_sk", "pr_item_sk": "item_sk"}, inplace=True
)
w_et = time.time()
logging.warning("Single part took = {}".format(w_et - w_st))
return output_df.drop_duplicates()
def main(client, config):
import cudf
model_path = os.path.join(config["data_dir"], "../../q27_model_dir")
product_reviews_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
dask_profile=config["dask_profile"],
)
product_reviews_df = product_reviews_df[
product_reviews_df.pr_item_sk == q27_pr_item_sk
].persist()
meta_d = {
"review_sk": np.ones(1, dtype=np.int64),
"item_sk": np.ones(1, dtype=np.int64),
"company_name": "",
"review_sentence": "",
}
meta_df = cudf.DataFrame(meta_d)
output_df = product_reviews_df.map_partitions(
run_single_part_workflow, model_path, meta=meta_df
)
output_df = output_df.persist()
wait(output_df)
client.run(del_model_attribute)
return output_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
import cudf
import dask_cudf
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
client.run(rmm.reinitialize, pool_allocator=True, initial_pool_size=14e9)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q27/README.md | # Query 27
## Query Details:
In this query, we find "competitor" company names in the product reviews for a given product.
The final output is review id, product id, "competitor’s" company name and the related sentence from the online review.
We have two implementations for this query:
## 1. [HuggingFace Implementation](gpu_bdb_query_hf_27.py)
This implementation uses [HuggingFace's](https://huggingface.co/) [token-classification](https://github.com/huggingface/transformers/tree/master/examples/token-classification) to do NER. We suggest choosing between the following models for optimal speed and accuracy.
1. [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) (2.5x Spacy Implementation, `89.6 F1` on conll-2003)
2. [base-base-ner](https://huggingface.co/dslim/bert-base-NER) (1.7x Spacy Implementation, `91.95 F1` on conll-2003)
### Setup:
#### 1. Distilbert-base-cased
To use `distil-bert-cased` model:
a. Download it from https://huggingface.co/distilbert-base-cased
b. Fine tune the model to on [conll-2003](https://www.clips.uantwerpen.be/conll2003/ner/) by following HuggingFace's example at [link](https://huggingface.co/transformers/v2.4.0/examples.html#named-entity-recognition)
c. Place it on your shared directory `data_dir` +`../../q27_hf_model`
**Commands Used:**
```CUDA_VISIBLE_DEVICES=0 python run_ner.py config-distilbert.json```
**config-distilbert.json**
```json
{
"data_dir": "./data",
"labels": "./data/labels.txt",
"model_name_or_path": "distilbert-base-cased",
"output_dir": "distilbert-base-cased",
"max_seq_length": 512,
"num_train_epochs": 5,
"per_device_train_batch_size": 16,
"save_steps": 878,
"seed": 1,
"do_train": true,
"do_eval": true,
"do_predict": true,
"--fp16": true
}
```
#### 2. Bert-base-ner
a. Download it from https://huggingface.co/dslim/bert-base-NER
b. Place it on your shared directory `data_dir` +`../../q27_hf_model`
## 2. [spaCy Implementation](gpu_bdb_query_27.py)
This implementation relies on spaCy's [entityrecognizer](https://spacy.io/api/entityrecognizer ) model.
Download the spaCy model via :
```
python -m spacy download en_core_web_sm
```
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q27/gpu_bdb_query_27_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences
)
from bdb_tools.cluster_startup import attach_to_cluster
from dask.distributed import wait
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q27_utils import (
ner_parser,
q27_pr_item_sk,
EOL_CHAR,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
import dask_cudf
query = f"""
SELECT pr_review_sk, pr_item_sk, pr_review_content
FROM product_reviews
WHERE pr_item_sk = {q27_pr_item_sk}
"""
product_reviews_df = c.sql(query)
sentences = product_reviews_df.map_partitions(
create_sentences_from_reviews,
review_column="pr_review_content",
end_of_line_char=EOL_CHAR,
)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
del product_reviews_df
# Do the NER
sentences = sentences.to_dask_dataframe()
ner_parsed = sentences.map_partitions(ner_parser, "sentence")
ner_parsed = dask_cudf.from_dask_dataframe(ner_parsed)
ner_parsed = ner_parsed.persist()
wait(ner_parsed)
ner_parsed = ner_parsed[ner_parsed.company_name_list != ""]
# separate NER results into one row per found company
repeated_names = ner_parsed.map_partitions(
create_words_from_sentences,
sentence_column="company_name_list",
global_position_column="sentence_tokenized_global_pos",
delimiter="é",
)
del sentences
# recombine
repeated_names = repeated_names.persist()
wait(repeated_names)
c.create_table('repeated_names', repeated_names, persist=False)
ner_parsed = ner_parsed.persist()
wait(ner_parsed)
c.create_table('ner_parsed', ner_parsed, persist=False)
query = f"""
SELECT review_idx_global_pos as review_sk,
CAST({q27_pr_item_sk} AS BIGINT) as item_sk,
word as company_name,
sentence as review_sentence
FROM repeated_names left join ner_parsed
ON sentence_idx_global_pos = sentence_tokenized_global_pos
ORDER BY review_idx_global_pos, item_sk, word, sentence
"""
recombined = c.sql(query)
c.drop_table("repeated_names")
c.drop_table("ner_parsed")
del ner_parsed
del repeated_names
return recombined
@annotate("QUERY27", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q27/gpu_bdb_query_27.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query
)
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences
)
from bdb_tools.q27_utils import (
ner_parser,
q27_pr_item_sk,
EOL_CHAR,
read_tables
)
from dask.distributed import wait
def main(client, config):
product_reviews_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
product_reviews_df = product_reviews_df[
product_reviews_df.pr_item_sk == q27_pr_item_sk
]
sentences = product_reviews_df.map_partitions(
create_sentences_from_reviews,
review_column="pr_review_content",
end_of_line_char=EOL_CHAR,
)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
sentences = sentences.persist()
wait(sentences)
# Do the NER
sentences = sentences.to_dask_dataframe()
ner_parsed = sentences.map_partitions(ner_parser, "sentence")
ner_parsed = dask_cudf.from_dask_dataframe(ner_parsed)
ner_parsed = ner_parsed.persist()
wait(ner_parsed)
ner_parsed = ner_parsed[ner_parsed.company_name_list != ""]
# separate NER results into one row per found company
repeated_names = ner_parsed.map_partitions(
create_words_from_sentences,
sentence_column="company_name_list",
global_position_column="sentence_tokenized_global_pos",
delimiter="é",
)
# recombine
recombined = repeated_names.merge(
ner_parsed,
how="left",
left_on="sentence_idx_global_pos",
right_on="sentence_tokenized_global_pos",
)
recombined["pr_item_sk"] = q27_pr_item_sk
recombined = recombined[["review_idx_global_pos", "pr_item_sk", "word", "sentence"]]
recombined = recombined.persist()
wait(recombined)
recombined = recombined.sort_values(
["review_idx_global_pos", "pr_item_sk", "word", "sentence"]
).persist()
recombined.columns = ["review_sk", "item_sk", "company_name", "review_sentence"]
recombined = recombined.persist()
wait(recombined)
return recombined
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q23/gpu_bdb_query_23_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
# -------- Q23 -----------
q23_year = 2001
q23_month = 1
q23_coefficient = 1.3
def read_tables(data_dir, bc):
bc.create_table('inventory', os.path.join(data_dir, "inventory/*.parquet"))
bc.create_table('date_dim', os.path.join(data_dir, "date_dim/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = f"""
SELECT inv_warehouse_sk,
inv_item_sk,
inv_quantity_on_hand,
d_moy
FROM inventory inv
INNER JOIN date_dim d ON inv.inv_date_sk = d.d_date_sk
AND d.d_year = {q23_year}
AND d_moy between {q23_month} AND {q23_month + 1}
"""
inv_dates_result = bc.sql(query_1)
inv_dates_result = inv_dates_result.persist()
wait(inv_dates_result)
bc.create_table('inv_dates', inv_dates_result)
query_2 = """
SELECT inv_warehouse_sk,
inv_item_sk,
d_moy,
AVG(CAST(inv_quantity_on_hand AS DOUBLE)) AS q_mean
FROM inv_dates
GROUP BY inv_warehouse_sk, inv_item_sk, d_moy
"""
mean_result = bc.sql(query_2)
mean_result = mean_result.persist()
wait(mean_result)
bc.create_table('mean_df', mean_result)
query_3 = """
SELECT id.inv_warehouse_sk,
id.inv_item_sk,
id.d_moy,
md.q_mean,
SQRT( SUM( (id.inv_quantity_on_hand - md.q_mean) * (id.inv_quantity_on_hand - md.q_mean) )
/ (COUNT(id.inv_quantity_on_hand) - 1.0)) AS q_std
FROM mean_df md
INNER JOIN inv_dates id ON id.inv_warehouse_sk = md.inv_warehouse_sk
AND id.inv_item_sk = md.inv_item_sk
AND id.d_moy = md.d_moy
AND md.q_mean > 0.0
GROUP BY id.inv_warehouse_sk, id.inv_item_sk, id.d_moy, md.q_mean
"""
std_result = bc.sql(query_3)
bc.drop_table("inv_dates")
del inv_dates_result
bc.drop_table("mean_df")
del mean_result
std_result = std_result.persist()
wait(std_result)
bc.create_table('iteration', std_result)
query_4 = f"""
SELECT inv_warehouse_sk,
inv_item_sk,
d_moy,
q_std / q_mean AS qty_cov
FROM iteration
WHERE (q_std / q_mean) >= {q23_coefficient}
"""
std_result = bc.sql(query_4)
bc.drop_table("iteration")
std_result = std_result.persist()
wait(std_result)
bc.create_table('temp_table', std_result)
query = f"""
SELECT inv1.inv_warehouse_sk,
inv1.inv_item_sk,
inv1.d_moy,
inv1.qty_cov AS cov,
inv2.d_moy AS inv2_d_moy,
inv2.qty_cov AS inv2_cov
FROM temp_table inv1
INNER JOIN temp_table inv2 ON inv1.inv_warehouse_sk = inv2.inv_warehouse_sk
AND inv1.inv_item_sk = inv2.inv_item_sk
AND inv1.d_moy = {q23_month}
AND inv2.d_moy = {q23_month + 1}
ORDER BY inv1.inv_warehouse_sk,
inv1.inv_item_sk
"""
result = bc.sql(query)
bc.drop_table("temp_table")
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q23/gpu_bdb_query_23.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q23_utils import (
q23_year,
q23_month,
q23_coefficient,
read_tables
)
from distributed import wait
def get_iteration1(merged_inv_dates, n_workers):
grouped_df = merged_inv_dates.groupby(["inv_warehouse_sk", "inv_item_sk", "d_moy"])
q23_tmp_inv_part = grouped_df.agg(
{"inv_quantity_on_hand": ["mean", "std"]}, split_out=n_workers
)
q23_tmp_inv_part.columns = ["qty_mean", "qty_std"]
q23_tmp_inv_part = q23_tmp_inv_part.reset_index(drop=False)
q23_tmp_inv_part = q23_tmp_inv_part.loc[q23_tmp_inv_part["qty_mean"] > 0]
q23_tmp_inv_part = q23_tmp_inv_part.reset_index(drop=True)
q23_tmp_inv_part["qty_cov"] = (
q23_tmp_inv_part["qty_std"] / q23_tmp_inv_part["qty_mean"]
)
iteration1_df = q23_tmp_inv_part.query(f"qty_cov >= {q23_coefficient}").reset_index(
drop=True
)
return iteration1_df
def main(client, config):
date_df, inv_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
expr = (
f"d_year == {q23_year} and (d_moy >= {q23_month} and d_moy <= {q23_month + 1})"
)
selected_dates_df = date_df.query(expr)
merged_inv_dates = inv_df.merge(
selected_dates_df, left_on="inv_date_sk", right_on="d_date_sk", how="inner"
)
n_workers = len(client.scheduler_info()["workers"])
iteration1_df = get_iteration1(merged_inv_dates, n_workers)
# Select only the columns we are interested in
iteration1_df = iteration1_df[
["inv_warehouse_sk", "inv_item_sk", "d_moy", "qty_cov"]
].repartition(
npartitions=1
) # iteration1_df has 40k rows at sf-100
expr_1 = f"d_moy == {q23_month}"
inv1_df = iteration1_df.query(expr_1) # inv1_df has 13k rows at sf-100
expr_2 = f"d_moy == {q23_month + 1}"
inv2_df = iteration1_df.query(expr_2) # 31k rows at sf-100
result_df = inv1_df.merge(inv2_df, on=["inv_warehouse_sk", "inv_item_sk"])
result_df = result_df.rename(
columns={
"d_moy_x": "d_moy",
"d_moy_y": "inv2_d_moy",
"qty_cov_x": "cov",
"qty_cov_y": "inv2_cov",
}
)
result_df = result_df.persist()
result_df = result_df.sort_values(by=["inv_warehouse_sk", "inv_item_sk"])
result_df = result_df.reset_index(drop=True)
result_df = result_df.persist()
wait(result_df)
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q23/gpu_bdb_query_23_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q23_utils import (
q23_year,
q23_month,
q23_coefficient,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = f"""
SELECT inv_warehouse_sk,
inv_item_sk,
inv_quantity_on_hand,
d_moy
FROM inventory inv
INNER JOIN date_dim d ON inv.inv_date_sk = d.d_date_sk
AND d.d_year = {q23_year}
AND d_moy between {q23_month} AND {q23_month + 1}
"""
inv_dates_result = c.sql(query_1)
c.create_table('inv_dates', inv_dates_result, persist=False)
query_2 = """
SELECT inv_warehouse_sk,
inv_item_sk,
d_moy,
AVG(CAST(inv_quantity_on_hand AS DOUBLE)) AS q_mean,
stddev_samp(CAST(inv_quantity_on_hand as DOUBLE)) AS q_std
FROM inv_dates
GROUP BY inv_warehouse_sk, inv_item_sk, d_moy
"""
iteration_1 = c.sql(query_2)
c.create_table('iteration_1', iteration_1, persist=False)
query_3 = f"""
SELECT inv_warehouse_sk,
inv_item_sk,
d_moy,
q_std / q_mean AS qty_cov
FROM iteration_1
WHERE (q_std / q_mean) >= {q23_coefficient}
"""
iteration_2 = c.sql(query_3)
c.create_table('temp_table', iteration_2, persist=False)
query = f"""
SELECT inv1.inv_warehouse_sk,
inv1.inv_item_sk,
inv1.d_moy,
inv1.qty_cov AS cov,
inv2.d_moy AS inv2_d_moy,
inv2.qty_cov AS inv2_cov
FROM temp_table inv1
INNER JOIN temp_table inv2 ON inv1.inv_warehouse_sk = inv2.inv_warehouse_sk
AND inv1.inv_item_sk = inv2.inv_item_sk
AND inv1.d_moy = {q23_month}
AND inv2.d_moy = {q23_month + 1}
ORDER BY inv1.inv_warehouse_sk,
inv1.inv_item_sk
"""
result = c.sql(query)
result = result.persist()
wait(result)
c.drop_table("temp_table")
return result
@annotate("QUERY23", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q19/README.md | # Query 19
In this query, we retrieve the items with the highest number of returns where the number of returns was approximately equivalent across all store and web channels (within a tolerance of +/ 10%), within the week ending given dates.
We then analyse the online reviews for these items to see if there are any negative reviews.
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q19/gpu_bdb_query_19.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.text import create_sentences_from_reviews, create_words_from_sentences
from bdb_tools.q19_utils import (
q19_returns_dates_IN,
eol_char,
read_tables
)
from dask.distributed import wait
def main(client, config):
date_dim_df, store_returns_df, web_returns_df, product_reviews_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
# filter date table
date_dim_df = date_dim_df.merge(
date_dim_df, on=["d_week_seq"], how="outer", suffixes=("", "_r")
)
date_dim_df = date_dim_df[date_dim_df.d_date_r.isin(q19_returns_dates_IN)].reset_index(
drop=True
)
date_dim_df = date_dim_df[["d_date_sk"]].drop_duplicates()
sr_merged_df = store_returns_df.merge(
date_dim_df,
left_on=["sr_returned_date_sk"],
right_on=["d_date_sk"],
how="inner",
)
sr_merged_df = sr_merged_df[["sr_item_sk", "sr_return_quantity"]]
sr_grouped_df = (
sr_merged_df.groupby(["sr_item_sk"])
.agg({"sr_return_quantity": "sum"})
.reset_index()
.rename(columns={"sr_return_quantity": "sr_item_qty"})
)
sr_grouped_df = sr_grouped_df[sr_grouped_df["sr_item_qty"] > 0]
wr_merged_df = web_returns_df.merge(
date_dim_df,
left_on=["wr_returned_date_sk"],
right_on=["d_date_sk"],
how="inner",
)
wr_merged_df = wr_merged_df[["wr_item_sk", "wr_return_quantity"]]
wr_grouped_df = (
wr_merged_df.groupby(["wr_item_sk"])
.agg({"wr_return_quantity": "sum"})
.reset_index()
.rename(columns={"wr_return_quantity": "wr_item_qty"})
)
wr_grouped_df = wr_grouped_df[wr_grouped_df["wr_item_qty"] > 0].reset_index(
drop=True
)
sr_wr_merged_df = sr_grouped_df.merge(
wr_grouped_df, left_on=["sr_item_sk"], right_on=["wr_item_sk"], how="inner"
)
sr_wr_merged_df = sr_wr_merged_df[["sr_item_sk", "sr_item_qty", "wr_item_qty"]]
product_reviews_df = product_reviews_df[
~product_reviews_df.pr_review_content.isnull()
].reset_index(drop=True)
product_reviews_df["pr_item_sk"] = product_reviews_df["pr_item_sk"].astype("int32")
sr_wr_merged_df["sr_item_sk"] = sr_wr_merged_df["sr_item_sk"].astype("int32")
merged_df = product_reviews_df.merge(
sr_wr_merged_df, left_on=["pr_item_sk"], right_on=["sr_item_sk"], how="inner"
)
cols_keep = [
"pr_item_sk",
"pr_review_content",
"pr_review_sk",
"sr_item_qty",
"wr_item_qty",
]
merged_df = merged_df[cols_keep]
merged_df["tolerance_flag"] = (
(merged_df["sr_item_qty"] - merged_df["wr_item_qty"])
/ ((merged_df["sr_item_qty"] + merged_df["wr_item_qty"]) / 2)
).abs() <= 0.1
merged_df = merged_df[merged_df["tolerance_flag"] == True].reset_index(drop=True)
merged_df = merged_df[["pr_item_sk", "pr_review_content", "pr_review_sk"]]
merged_df["pr_review_content"] = merged_df.pr_review_content.str.lower()
merged_df["pr_review_content"] = merged_df.pr_review_content.str.replace(
[".", "?", "!"], [eol_char], regex=False
)
sentences = merged_df.map_partitions(create_sentences_from_reviews)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
# This file comes from the official TPCx-BB kit
# We extracted it from bigbenchqueriesmr.jar
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
with open(os.path.join(sentiment_dir, "negativeSentiment.txt")) as fh:
negativeSentiment = list(map(str.strip, fh.readlines()))
# dedupe for one extra record in the source file
negativeSentiment = list(set(negativeSentiment))
sent_df = cudf.DataFrame({"word": negativeSentiment})
sent_df["sentiment"] = "NEG"
sent_df = dask_cudf.from_cudf(sent_df, npartitions=1)
word_sentence_sentiment = word_df.merge(sent_df, how="inner", on="word")
merged_df["pr_review_sk"] = merged_df["pr_review_sk"].astype("int32")
temp = word_sentence_sentiment.merge(
sentences,
how="left",
left_on="sentence_idx_global_pos",
right_on="sentence_tokenized_global_pos",
)
temp = temp[["review_idx_global_pos", "word", "sentiment", "sentence"]]
merged_df = merged_df[["pr_item_sk", "pr_review_sk"]]
final = temp.merge(
merged_df, how="inner", left_on="review_idx_global_pos", right_on="pr_review_sk"
)
final = final.rename(
columns={
"pr_item_sk": "item_sk",
"sentence": "review_sentence",
"word": "sentiment_word",
}
)
keepcols = ["item_sk", "review_sentence", "sentiment", "sentiment_word"]
final = final[keepcols]
final = final.persist()
final = final.sort_values(by=keepcols)
wait(final)
return final
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.