repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q19/gpu_bdb_query_19_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences
)
from dask.distributed import wait
# -------- Q19 -----------
q19_returns_dates_IN = ["2004-03-08", "2004-08-02", "2004-11-15", "2004-12-20"]
eol_char = "è"
def read_tables(data_dir, bc):
bc.create_table('web_returns', os.path.join(data_dir, "web_returns/*.parquet"))
bc.create_table('date_dim', os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table('product_reviews', os.path.join(data_dir, "product_reviews/*.parquet"))
bc.create_table('store_returns', os.path.join(data_dir, "store_returns/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
WITH dateFilter AS
(
-- within the week ending a given date
SELECT d1.d_date_sk
FROM date_dim d1, date_dim d2
WHERE d1.d_week_seq = d2.d_week_seq
AND CAST(d2.d_date AS DATE) IN (DATE '{q19_returns_dates_IN[0]}',
DATE '{q19_returns_dates_IN[1]}',
DATE '{q19_returns_dates_IN[2]}',
DATE '{q19_returns_dates_IN[3]}')
), fsr AS
(
--store returns in week ending given date
SELECT sr_item_sk, SUM(sr_return_quantity) sr_item_qty
FROM store_returns sr
INNER JOIN dateFilter d
ON sr.sr_returned_date_sk = d.d_date_sk
GROUP BY sr_item_sk --across all store and web channels
HAVING SUM(sr_return_quantity) > 0
), fwr AS
(
--web returns in week ending given date
SELECT wr_item_sk, SUM(wr_return_quantity) wr_item_qty
FROM web_returns wr
INNER JOIN dateFilter d
ON wr.wr_returned_date_sk = d_date_sk
GROUP BY wr_item_sk --across all store and web channels
HAVING SUM(wr_return_quantity) > 0
), extract_sentiment AS
(
SELECT pr.pr_item_sk, pr.pr_review_content, pr.pr_review_sk
FROM product_reviews pr
INNER JOIN fsr
ON pr.pr_item_sk = fsr.sr_item_sk
INNER JOIN fwr
ON fsr.sr_item_sk = fwr.wr_item_sk
WHERE pr.pr_review_content IS NOT NULL ---- add as rapids
AND abs( CAST((sr_item_qty-wr_item_qty) AS DOUBLE) /
((sr_item_qty + wr_item_qty)/2) ) <= 0.1
)
SELECT * FROM extract_sentiment
ORDER BY pr_item_sk, pr_review_content, pr_review_sk
"""
merged_df = bc.sql(query)
# second step -- Sentiment Word Extraction
merged_df["pr_review_sk"] = merged_df["pr_review_sk"].astype("int32")
merged_df["pr_review_content"] = merged_df.pr_review_content.str.lower()
merged_df["pr_review_content"] = merged_df.pr_review_content.str.replace(
[".", "?", "!"], [eol_char], regex=False
)
sentences = merged_df.map_partitions(create_sentences_from_reviews)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences['sentence_tokenized_global_pos'] = sentences['x'].cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
# This txt file comes from the official TPCx-BB kit
# We extracted it from bigbenchqueriesmr.jar
# Need to pass the absolute path for this txt file
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
bc.create_table('sent_df',
os.path.join(sentiment_dir, "negativeSentiment.txt"),
names=['sentiment_word'],
dtype=['str'],
file_format="csv")
sentences = sentences.persist()
wait(sentences)
bc.create_table('sentences_df', sentences)
word_df = word_df.persist()
wait(word_df)
bc.create_table('word_df', word_df)
merged_df = merged_df.persist()
wait(merged_df)
bc.create_table('merged_df', merged_df)
query = """
WITH negativesent AS
(
SELECT distinct sentiment_word
FROM sent_df
), word_sentence_sentiment AS
(
SELECT sd.sentiment_word,
wd.sentence_idx_global_pos
FROM word_df wd
INNER JOIN negativesent sd ON wd.word = sd.sentiment_word
), temp AS
(
SELECT s.review_idx_global_pos,
w.sentiment_word,
s.sentence
FROM word_sentence_sentiment w
LEFT JOIN sentences_df s
ON w.sentence_idx_global_pos = s.sentence_tokenized_global_pos
)
SELECT pr_item_sk AS item_sk,
sentence AS review_sentence,
'NEG' AS sentiment,
sentiment_word
FROM temp
INNER JOIN merged_df ON pr_review_sk = review_idx_global_pos
ORDER BY pr_item_sk, review_sentence, sentiment_word
"""
result = bc.sql(query)
bc.drop_table("sentences_df")
del sentences
bc.drop_table("word_df")
del word_df
bc.drop_table("merged_df")
del merged_df
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q19/gpu_bdb_query_19_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
import os
import dask.dataframe as dd
import dask_cudf
import numpy as np
import pandas as pd
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences,
)
from bdb_tools.q19_utils import (
q19_returns_dates_IN,
eol_char,
read_tables,
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = f"""
WITH dateFilter AS
(
-- within the week ending a given date
SELECT d1.d_date_sk
FROM date_dim d1, date_dim d2
WHERE d1.d_week_seq = d2.d_week_seq
AND CAST(d2.d_date AS DATE) IN (DATE '{q19_returns_dates_IN[0]}',
DATE '{q19_returns_dates_IN[1]}',
DATE '{q19_returns_dates_IN[2]}',
DATE '{q19_returns_dates_IN[3]}')
), fsr AS
(
--store returns in week ending given date
SELECT sr_item_sk, SUM(sr_return_quantity) sr_item_qty
FROM store_returns sr
INNER JOIN dateFilter d
ON sr.sr_returned_date_sk = d.d_date_sk
GROUP BY sr_item_sk --across all store and web channels
HAVING SUM(sr_return_quantity) > 0
), fwr AS
(
--web returns in week ending given date
SELECT wr_item_sk, SUM(wr_return_quantity) wr_item_qty
FROM web_returns wr
INNER JOIN dateFilter d
ON wr.wr_returned_date_sk = d_date_sk
GROUP BY wr_item_sk --across all store and web channels
HAVING SUM(wr_return_quantity) > 0
), extract_sentiment AS
(
SELECT pr.pr_item_sk, pr.pr_review_content, pr.pr_review_sk
FROM product_reviews pr
INNER JOIN fsr
ON pr.pr_item_sk = fsr.sr_item_sk
INNER JOIN fwr
ON fsr.sr_item_sk = fwr.wr_item_sk
WHERE pr.pr_review_content IS NOT NULL ---- add as rapids
AND abs( CAST((sr_item_qty-wr_item_qty) AS DOUBLE) /
((sr_item_qty + wr_item_qty)/2) ) <= 0.1
)
SELECT * FROM extract_sentiment
ORDER BY pr_item_sk, pr_review_content, pr_review_sk
"""
merged_df = c.sql(query)
# second step -- Sentiment Word Extraction
merged_df["pr_review_sk"] = merged_df["pr_review_sk"].astype("int32")
merged_df["pr_review_content"] = merged_df.pr_review_content.str.lower()
for char in [".", "?", "!"]:
merged_df["pr_review_content"] = merged_df.pr_review_content.str.replace(char, eol_char, regex=False)
sentences = merged_df.map_partitions(create_sentences_from_reviews)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences['sentence_tokenized_global_pos'] = sentences['x'].cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
# This txt file comes from the official TPCx-BB kit
# We extracted it from bigbenchqueriesmr.jar
# Need to pass the absolute path for this txt file
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
if isinstance(merged_df, dask_cudf.DataFrame):
ns_df = dask_cudf.read_csv(os.path.join(sentiment_dir, "negativeSentiment.txt"), names=["sentiment_word"])
else:
ns_df = dd.read_csv(os.path.join(sentiment_dir, "negativeSentiment.txt"), names=["sentiment_word"])
c.create_table('sent_df', ns_df, persist=False)
sentences = sentences.persist()
wait(sentences)
c.create_table('sentences_df', sentences, persist=False)
word_df = word_df.persist()
wait(word_df)
c.create_table('word_df', word_df, persist=False)
merged_df = merged_df.persist()
wait(merged_df)
c.create_table('merged_df', merged_df, persist=False)
query = """
WITH negativesent AS
(
SELECT distinct sentiment_word
FROM sent_df
), word_sentence_sentiment AS
(
SELECT sd.sentiment_word,
wd.sentence_idx_global_pos
FROM word_df wd
INNER JOIN negativesent sd ON wd.word = sd.sentiment_word
), temp AS
(
SELECT s.review_idx_global_pos,
w.sentiment_word,
s.sentence
FROM word_sentence_sentiment w
LEFT JOIN sentences_df s
ON w.sentence_idx_global_pos = s.sentence_tokenized_global_pos
)
SELECT pr_item_sk AS item_sk,
sentence AS review_sentence,
'NEG' AS sentiment,
sentiment_word
FROM temp
INNER JOIN merged_df ON pr_review_sk = review_idx_global_pos
ORDER BY pr_item_sk, review_sentence, sentiment_word
"""
result = c.sql(query)
c.drop_table("sentences_df")
del sentences
c.drop_table("word_df")
del word_df
c.drop_table("merged_df")
del merged_df
return result
@annotate("QUERY19", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q12/gpu_bdb_query_12.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import glob
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q12_utils import read_tables
from distributed import wait
import numpy as np
from dask import delayed
### Current Implementation Assumption
# The filtered item table will fit in GPU memory :
# At scale `100` ->non filtered-rows `178,200` -> filtered rows `60,059`
# Extrapolation to scale `1_000_000` ->non filtered-rows `17,820,000` -> filtered rows `6,005,900` (So should scale up)
### These parameters are not used
q12_i_category_IN = ["Books", "Electronics"]
### below was hard coded in the orignal query
q12_store_sale_sk_start_date = 37134
### Util Functions
def string_filter(df, col_name, col_values):
"""
Filters strings based on values
"""
bool_flag = None
for val in col_values:
if bool_flag is None:
bool_flag = df[col_name] == val
else:
bool_flag = (df[col_name] == val) | (bool_flag)
return df[bool_flag].reset_index(drop=True)
def filter_wcs_table(web_clickstreams_fn, filtered_item_df):
"""
Filter web clickstreams table
### SELECT
### wcs_user_sk,
### wcs_click_date_sk
### FROM web_clickstreams, item
### WHERE wcs_click_date_sk BETWEEN 37134 AND (37134 + 30) -- in a given month and year
### -- AND i_category IN ({q12_i_category_IN}) -- filter given category
### AND wcs_item_sk = i_item_sk
## AND wcs_user_sk IS NOT NULL
### AND wcs_sales_sk IS NULL --only views, not purchases
"""
web_clickstreams_cols = [
"wcs_user_sk",
"wcs_click_date_sk",
"wcs_item_sk",
"wcs_sales_sk",
]
web_clickstreams_df = cudf.read_parquet(
web_clickstreams_fn, columns=web_clickstreams_cols
)
filter_wcs_df = web_clickstreams_df[
web_clickstreams_df["wcs_user_sk"].notnull()
& web_clickstreams_df["wcs_sales_sk"].isnull()
].reset_index(drop=True)
filter_wcs_df = filter_wcs_df.loc[
(filter_wcs_df["wcs_click_date_sk"] >= q12_store_sale_sk_start_date)
& (filter_wcs_df["wcs_click_date_sk"] <= (q12_store_sale_sk_start_date + 30))
].reset_index(drop=True)
filter_wcs_df = filter_wcs_df.merge(
filtered_item_df, left_on=["wcs_item_sk"], right_on=["i_item_sk"], how="inner"
)
return filter_wcs_df[["wcs_user_sk", "wcs_click_date_sk"]]
def filter_ss_table(store_sales_df, filtered_item_df):
"""
Filter store sales table
### SELECT
### ss_customer_sk,
### ss_sold_date_sk
### FROM store_sales, item
### WHERE ss_sold_date_sk BETWEEN 37134 AND (37134 + 90) -- in the three consecutive months.
### AND i_category IN ({q12_i_category_IN}) -- filter given category
### AND ss_item_sk = i_item_sk
### AND ss_customer_sk IS NOT NULL
"""
filtered_ss_df = store_sales_df[
store_sales_df["ss_customer_sk"].notnull()
].reset_index(drop=True)
filtered_ss_df = filtered_ss_df.loc[
(filtered_ss_df["ss_sold_date_sk"] >= q12_store_sale_sk_start_date)
& (filtered_ss_df["ss_sold_date_sk"] <= (q12_store_sale_sk_start_date + 90))
].reset_index(drop=True)
filtered_ss_df = filtered_ss_df.merge(
filtered_item_df, left_on=["ss_item_sk"], right_on=["i_item_sk"], how="inner"
)
return filtered_ss_df[["ss_customer_sk", "ss_sold_date_sk"]]
def main(client, config):
item_df, store_sales_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
### Query 0. Filtering item table
filtered_item_df = string_filter(item_df, "i_category", q12_i_category_IN)
### filtered_item_df is a single partition to allow a nx1 merge using map partitions
filtered_item_df = filtered_item_df.repartition(npartitions=1)
filtered_item_df = filtered_item_df.persist()
wait(filtered_item_df)
### Query 1
# The main idea is that we don't fuse a filtration task with reading task yet
# this causes more memory pressures as we try to read the whole thing ( and spill that)
# at once and then do filtration .
meta_d = {
"wcs_user_sk": np.ones(1, dtype=np.int64),
"wcs_click_date_sk": np.ones(1, dtype=np.int64),
}
meta_df = cudf.DataFrame(meta_d)
web_clickstream_flist = glob.glob(os.path.join(config["data_dir"], "web_clickstreams/*.parquet"))
task_ls = [
delayed(filter_wcs_table)(fn, filtered_item_df.to_delayed()[0])
for fn in web_clickstream_flist
]
filter_wcs_df = dask_cudf.from_delayed(task_ls, meta=meta_df)
### Query 2
# The main idea is that we don't fuse a filtration task with reading task yet
# this causes more memory pressures as we try to read the whole thing ( and spill that)
# at once and then do filtration .
meta_d = {
"ss_customer_sk": np.ones(1, dtype=store_sales_df["ss_customer_sk"].dtype),
"ss_sold_date_sk": np.ones(1, dtype=np.int64),
}
meta_df = cudf.DataFrame(meta_d)
filtered_ss_df = store_sales_df.map_partitions(
filter_ss_table, filtered_item_df.to_delayed()[0], meta=meta_df
)
### Result Query
### SELECT DISTINCT wcs_user_sk
### ....
### webInRange
### storeInRange
### WHERE wcs_user_sk = ss_customer_sk
### AND wcs_click_date_sk < ss_sold_date_sk -- buy AFTER viewed on website
### ORDER BY wcs_user_sk
### Note: Below brings it down to a single partition
filter_wcs_df_d = filter_wcs_df.drop_duplicates()
filter_wcs_df_d = filter_wcs_df_d.persist()
wait(filter_wcs_df_d)
filtered_ss_df_d = filtered_ss_df.drop_duplicates()
filtered_ss_df_d = filtered_ss_df_d.persist()
wait(filtered_ss_df_d)
ss_wcs_join = filter_wcs_df_d.merge(
filtered_ss_df_d, left_on="wcs_user_sk", right_on="ss_customer_sk", how="inner"
)
ss_wcs_join = ss_wcs_join[
ss_wcs_join["wcs_click_date_sk"] < ss_wcs_join["ss_sold_date_sk"]
]
ss_wcs_join = ss_wcs_join["wcs_user_sk"]
### todo: check performence by replacing with 1 single drop_duplicates call
### below decreases memory usage on the single gpu to help with subsequent compute
ss_wcs_join = ss_wcs_join.map_partitions(lambda sr: sr.drop_duplicates())
ss_wcs_join = ss_wcs_join.repartition(npartitions=1).persist()
ss_wcs_join = ss_wcs_join.drop_duplicates().reset_index(drop=True)
ss_wcs_join = ss_wcs_join.map_partitions(lambda ser: ser.sort_values())
# todo:check if repartition helps for writing efficiency
# context: 0.1 seconds on sf-1k
ss_wcs_join = ss_wcs_join.persist()
wait(ss_wcs_join)
return ss_wcs_join.to_frame()
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q12/gpu_bdb_query_12_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
# -------- Q12 -----------
q12_i_category_IN = "'Books', 'Electronics'"
def read_tables(data_dir, bc):
bc.create_table("web_clickstreams",
os.path.join(data_dir, "web_clickstreams/*.parquet"))
bc.create_table("store_sales",
os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
SELECT DISTINCT wcs_user_sk
FROM
(
SELECT DISTINCT
wcs_user_sk,
wcs_click_date_sk
FROM web_clickstreams, item
WHERE wcs_click_date_sk BETWEEN 37134 AND 37164
AND i_category IN ({q12_i_category_IN})
AND wcs_item_sk = i_item_sk
AND wcs_user_sk IS NOT NULL
AND wcs_sales_sk IS NULL
) webInRange,
(
SELECT DISTINCT
ss_customer_sk,
ss_sold_date_sk
FROM store_sales, item
WHERE ss_sold_date_sk BETWEEN 37134 AND 37224
AND i_category IN ({q12_i_category_IN}) -- filter given category
AND ss_item_sk = i_item_sk
AND ss_customer_sk IS NOT NULL
) storeInRange
WHERE wcs_user_sk = ss_customer_sk
AND wcs_click_date_sk < ss_sold_date_sk
ORDER BY wcs_user_sk
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q12/gpu_bdb_query_12_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q12_utils import read_tables
q12_i_category_IN = "'Books', 'Electronics'"
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = f"""
SELECT DISTINCT wcs_user_sk
FROM
(
SELECT DISTINCT
wcs_user_sk,
wcs_click_date_sk
FROM web_clickstreams, item
WHERE wcs_click_date_sk BETWEEN 37134 AND 37164
AND i_category IN ({q12_i_category_IN})
AND wcs_item_sk = i_item_sk
AND wcs_user_sk IS NOT NULL
AND wcs_sales_sk IS NULL
) webInRange,
(
SELECT DISTINCT
ss_customer_sk,
ss_sold_date_sk
FROM store_sales, item
WHERE ss_sold_date_sk BETWEEN 37134 AND 37224
AND i_category IN ({q12_i_category_IN}) -- filter given category
AND ss_item_sk = i_item_sk
AND ss_customer_sk IS NOT NULL
) storeInRange
WHERE wcs_user_sk = ss_customer_sk
AND wcs_click_date_sk < ss_sold_date_sk
ORDER BY wcs_user_sk
"""
result = c.sql(query)
return result
@annotate("QUERY12", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q08/gpu_bdb_query_08.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import glob
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
convert_datestring_to_days
)
from bdb_tools.merge_util import hash_merge
from bdb_tools.q08_utils import (
get_sessions,
get_unique_sales_keys_from_sessions,
prep_for_sessionization,
q08_STARTDATE,
q08_ENDDATE,
read_tables
)
import numpy as np
from distributed import wait
from dask import delayed
def etl_wcs(wcs_fn, filtered_date_df, web_page_df):
filtered_date_df = filtered_date_df
web_page_df = web_page_df
web_clickstreams_cols = [
"wcs_user_sk",
"wcs_click_date_sk",
"wcs_sales_sk",
"wcs_web_page_sk",
"wcs_click_time_sk",
]
web_clickstreams_df = cudf.read_parquet(wcs_fn, columns=web_clickstreams_cols)
web_clickstreams_df = web_clickstreams_df[
web_clickstreams_df["wcs_user_sk"].notnull()
].reset_index(drop=True)
merged_df = web_clickstreams_df.merge(
filtered_date_df,
right_on=["d_date_sk"],
left_on=["wcs_click_date_sk"],
how="inner",
)
merged_df = merged_df.merge(
web_page_df,
left_on=["wcs_web_page_sk"],
right_on=["wp_web_page_sk"],
how="inner",
)
### decrease column after merge
merged_df["tstamp_inSec"] = (
merged_df["wcs_click_date_sk"] * 86400 + merged_df["wcs_click_time_sk"]
)
cols_to_keep = ["wcs_user_sk", "tstamp_inSec", "wcs_sales_sk", "wp_type_codes"]
return merged_df[cols_to_keep]
def reduction_function(df, REVIEW_CAT_CODE):
# category code of review records
df["review_flag"] = df.wp_type_codes == REVIEW_CAT_CODE
# set_index in the previous statement will make sure all records of each wcs_user_sk end up in one partition.
df = prep_for_sessionization(df, review_cat_code=REVIEW_CAT_CODE)
df = get_sessions(df)
df = get_unique_sales_keys_from_sessions(df, REVIEW_CAT_CODE)
return df.to_frame()
def main(client, config):
(date_dim_df, web_page_df, web_sales_df) = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
date_dim_cov_df = date_dim_df.map_partitions(convert_datestring_to_days)
q08_start_dt = np.datetime64(q08_STARTDATE, "D").astype(int)
q08_end_dt = np.datetime64(q08_ENDDATE, "D").astype(int)
filtered_date_df = date_dim_cov_df.query(
f"d_date >= {q08_start_dt} and d_date <= {q08_end_dt}",
meta=date_dim_cov_df._meta,
).reset_index(drop=True)
# Convert wp_type to categorical and get cat_id of review and dynamic type
# see https://github.com/rapidsai/cudf/issues/4093 for more info
web_page_df = web_page_df.persist()
# map_partitions is a bit faster than ddf[col].astype('category')
web_page_df["wp_type"] = web_page_df["wp_type"].map_partitions(
lambda ser: ser.astype("category")
)
cpu_categories = web_page_df["wp_type"].compute().cat.categories.to_pandas()
REVIEW_CAT_CODE = cpu_categories.get_loc("review")
# cast to minimum viable dtype
codes_min_signed_type = cudf.utils.dtypes.min_signed_type(len(cpu_categories))
web_page_df["wp_type_codes"] = web_page_df["wp_type"].cat.codes.astype(
codes_min_signed_type
)
web_page_newcols = ["wp_web_page_sk", "wp_type_codes"]
web_page_df = web_page_df[web_page_newcols]
web_clickstream_flist = glob.glob(os.path.join(config["data_dir"], "web_clickstreams/*.parquet"))
task_ls = [
delayed(etl_wcs)(
fn, filtered_date_df.to_delayed()[0], web_page_df.to_delayed()[0]
)
for fn in web_clickstream_flist
]
meta_d = {
"wcs_user_sk": np.ones(1, dtype=np.int64),
"tstamp_inSec": np.ones(1, dtype=np.int64),
"wcs_sales_sk": np.ones(1, dtype=np.int64),
"wp_type_codes": np.ones(1, dtype=np.int8),
}
meta_df = cudf.DataFrame(meta_d)
merged_df = dask_cudf.from_delayed(task_ls, meta=meta_df)
merged_df = merged_df.shuffle(on=["wcs_user_sk"])
reviewed_sales = merged_df.map_partitions(
reduction_function,
REVIEW_CAT_CODE,
meta=cudf.DataFrame({"wcs_sales_sk": np.ones(1, dtype=np.int64)}),
)
reviewed_sales = reviewed_sales.persist()
wait(reviewed_sales)
del merged_df
all_sales_in_year = filtered_date_df.merge(
web_sales_df, left_on=["d_date_sk"], right_on=["ws_sold_date_sk"], how="inner"
)
all_sales_in_year = all_sales_in_year[["ws_net_paid", "ws_order_number"]]
all_sales_in_year = all_sales_in_year.persist()
wait(all_sales_in_year)
# note: switch to mainline
# once https://github.com/dask/dask/pull/6066
# lands
q08_reviewed_sales = hash_merge(
lhs=all_sales_in_year,
rhs=reviewed_sales,
left_on=["ws_order_number"],
right_on=["wcs_sales_sk"],
how="inner",
)
q08_reviewed_sales_sum = q08_reviewed_sales["ws_net_paid"].sum()
q08_all_sales_sum = all_sales_in_year["ws_net_paid"].sum()
q08_reviewed_sales_sum, q08_all_sales_sum = client.compute(
[q08_reviewed_sales_sum, q08_all_sales_sum]
)
q08_reviewed_sales_sum, q08_all_sales_sum = (
q08_reviewed_sales_sum.result(),
q08_all_sales_sum.result(),
)
no_q08_review_sales_amount = q08_all_sales_sum - q08_reviewed_sales_sum
final_result_df = cudf.DataFrame()
final_result_df["q08_review_sales_amount"] = [q08_reviewed_sales_sum]
final_result_df["q08_review_sales_amount"] = final_result_df[
"q08_review_sales_amount"
].astype("int")
final_result_df["no_q08_review_sales_amount"] = [no_q08_review_sales_amount]
final_result_df["no_q08_review_sales_amount"] = final_result_df[
"no_q08_review_sales_amount"
].astype("int")
return final_result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q08/gpu_bdb_query_08_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask_cudf
import pandas as pd
import dask.dataframe as dd
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q08_utils import (
get_sessions,
get_unique_sales_keys_from_sessions,
prep_for_sessionization,
q08_STARTDATE,
q08_ENDDATE,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = f"""
SELECT d_date_sk
FROM date_dim
WHERE CAST(d_date as date) IN (date '{q08_STARTDATE}',
date '{q08_ENDDATE}')
ORDER BY CAST(d_date as date) asc
"""
result_dates_sk_filter = c.sql(query_1).compute()
# because `result_dates_sk_filter` has repetitive index
result_dates_sk_filter.index = list(range(0, result_dates_sk_filter.shape[0]))
q08_start_dt = result_dates_sk_filter['d_date_sk'][0]
q08_end_dt = result_dates_sk_filter['d_date_sk'][1]
query_aux = """
SELECT
wp_web_page_sk,
wp_type
FROM web_page
"""
web_page_df = c.sql(query_aux)
# cast to minimum viable dtype
web_page_df["wp_type"] = web_page_df["wp_type"].map_partitions(
lambda ser: ser.astype("category")
)
cpu_categories = web_page_df["wp_type"].compute().cat.categories
if isinstance(web_page_df, dask_cudf.DataFrame):
cpu_categories = cpu_categories.to_pandas()
REVIEW_CAT_CODE = cpu_categories.get_loc("review")
web_page_df["wp_type_codes"] = web_page_df["wp_type"].cat.codes
web_page_newcols = ["wp_web_page_sk", "wp_type_codes"]
web_page_df = web_page_df[web_page_newcols]
web_page_df = web_page_df.persist()
wait(web_page_df)
c.create_table('web_page_2', web_page_df, persist=False)
query_2 = f"""
SELECT
CAST(wcs_user_sk AS INTEGER) AS wcs_user_sk,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec,
wcs_sales_sk,
wp_type_codes
FROM web_clickstreams
INNER JOIN web_page_2 ON wcs_web_page_sk = wp_web_page_sk
WHERE wcs_user_sk IS NOT NULL
AND wcs_click_date_sk BETWEEN {q08_start_dt} AND {q08_end_dt}
--in the future we want to remove this ORDER BY
DISTRIBUTE BY wcs_user_sk
"""
merged_df = c.sql(query_2)
c.drop_table("web_page_2")
del web_page_df
merged_df = merged_df.shuffle(on=["wcs_user_sk"])
merged_df["review_flag"] = merged_df.wp_type_codes == REVIEW_CAT_CODE
prepped = merged_df.map_partitions(
prep_for_sessionization, review_cat_code=REVIEW_CAT_CODE
)
sessionized = prepped.map_partitions(get_sessions)
unique_review_sales = sessionized.map_partitions(
get_unique_sales_keys_from_sessions, review_cat_code=REVIEW_CAT_CODE
)
if isinstance(merged_df, dask_cudf.DataFrame):
unique_review_sales = unique_review_sales.to_frame()
else:
unique_review_sales = dd.from_dask_array(unique_review_sales, columns='wcs_sales_sk').to_frame()
unique_review_sales = unique_review_sales.persist()
wait(unique_review_sales)
c.create_table("reviews", unique_review_sales, persist=False)
last_query = f"""
SELECT
CAST(review_total AS BIGINT) AS q08_review_sales_amount,
CAST(total - review_total AS BIGINT) AS no_q08_review_sales_amount
FROM
(
SELECT
SUM(ws_net_paid) AS total,
SUM(CASE when wcs_sales_sk IS NULL THEN 0 ELSE 1 END * ws_net_paid) AS review_total
FROM web_sales
LEFT OUTER JOIN reviews ON ws_order_number = wcs_sales_sk
WHERE ws_sold_date_sk between {q08_start_dt} AND {q08_end_dt}
)
"""
result = c.sql(last_query)
c.drop_table("reviews")
return result
@annotate("QUERY8", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q08/gpu_bdb_query_08_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
import cupy as cp
import numpy as np
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
# -------- Q8 -----------
q08_SECONDS_BEFORE_PURCHASE = 259200
q08_STARTDATE = "2001-09-02"
q08_ENDDATE = "2002-09-02"
REVIEW_CAT_CODE = 6
NA_FLAG = 0
def get_session_id_from_session_boundary(session_change_df, last_session_len):
"""
This function returns session starts given a session change df
"""
import cudf
user_session_ids = session_change_df.tstamp_inSec
### up shift the session length df
session_len = session_change_df["t_index"].diff().reset_index(drop=True)
session_len = session_len.shift(-1)
try:
session_len.iloc[-1] = last_session_len
except (AssertionError, IndexError) as e: # IndexError in numba >= 0.48
session_len = cudf.Series([])
session_id_final_series = (
cudf.Series(user_session_ids).repeat(session_len).reset_index(drop=True)
)
return session_id_final_series
def get_session_id(df):
"""
This function creates a session id column for each click
The session id grows in incremeant for each user's susbequent session
Session boundry is defined by the time_out
"""
df["user_change_flag"] = df["wcs_user_sk"].diff(periods=1) != 0
df["user_change_flag"] = df["user_change_flag"].fillna(True)
df["session_change_flag"] = df["review_flag"] | df["user_change_flag"]
df = df.reset_index(drop=True)
df["t_index"] = cp.arange(start=0, stop=len(df), dtype=np.int32)
session_change_df = df[df["session_change_flag"]].reset_index(drop=True)
try:
last_session_len = len(df) - session_change_df["t_index"].iloc[-1]
except (AssertionError, IndexError) as e: # IndexError in numba >= 0.48
last_session_len = 0
session_ids = get_session_id_from_session_boundary(
session_change_df, last_session_len
)
assert len(session_ids) == len(df)
return session_ids
def get_sessions(df):
df = df.sort_values(
by=["wcs_user_sk", "tstamp_inSec", "wcs_sales_sk", "wp_type_codes"]
).reset_index(drop=True)
df["session_id"] = get_session_id(df)
return df
def get_unique_sales_keys_from_sessions(sessionized, review_cat_code):
sessionized["relevant"] = (
(sessionized.tstamp_inSec - sessionized.session_id)
<= q08_SECONDS_BEFORE_PURCHASE
) & (sessionized.wcs_sales_sk != NA_FLAG)
unique_sales_sk = (
sessionized.query(f"wcs_sales_sk != {NA_FLAG}")
.query("relevant == True")
.query(f"wp_type_codes != {review_cat_code}")
.wcs_sales_sk.unique()
)
return unique_sales_sk
def prep_for_sessionization(df, review_cat_code):
df = df.fillna(NA_FLAG)
df = df.sort_values(
by=["wcs_user_sk", "tstamp_inSec", "wcs_sales_sk", "wp_type_codes"]
).reset_index(drop=True)
review_df = df.loc[df["wp_type_codes"] == review_cat_code]
# per user, the index of the first review
# need this to decide if a review was "recent enough"
every_users_first_review = (
review_df[["wcs_user_sk", "tstamp_inSec"]]
.drop_duplicates()
.reset_index()
.groupby("wcs_user_sk")["index"]
.min()
.reset_index()
)
every_users_first_review.columns = ["wcs_user_sk", "first_review_index"]
# then reset the index to keep the old index before parallel join
df_merged = df.reset_index().merge(
every_users_first_review, how="left", on="wcs_user_sk"
)
df_filtered = df_merged.query("index >= first_review_index")
return df_filtered
def read_tables(data_dir, bc):
bc.create_table("web_clickstreams", os.path.join(data_dir, "web_clickstreams/*.parquet"))
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table("web_page", os.path.join(data_dir, "web_page/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = f"""
SELECT d_date_sk
FROM date_dim
WHERE CAST(d_date as date) IN (date '{q08_STARTDATE}',
date '{q08_ENDDATE}')
ORDER BY CAST(d_date as date) asc
"""
result_dates_sk_filter = bc.sql(query_1).compute()
# because `result_dates_sk_filter` has repetitive index
result_dates_sk_filter.index = list(range(0, result_dates_sk_filter.shape[0]))
q08_start_dt = result_dates_sk_filter['d_date_sk'][0]
q08_end_dt = result_dates_sk_filter['d_date_sk'][1]
query_aux = """
SELECT
wp_web_page_sk,
wp_type
FROM web_page
"""
web_page_df = bc.sql(query_aux)
# cast to minimum viable dtype
web_page_df["wp_type"] = web_page_df["wp_type"].map_partitions(
lambda ser: ser.astype("category")
)
cpu_categories = web_page_df["wp_type"].compute().cat.categories.to_pandas()
REVIEW_CAT_CODE = cpu_categories.get_loc("review")
codes_min_signed_type = cudf.utils.dtypes.min_signed_type(len(cpu_categories))
web_page_df["wp_type_codes"] = web_page_df["wp_type"].cat.codes.astype(
codes_min_signed_type
)
web_page_newcols = ["wp_web_page_sk", "wp_type_codes"]
web_page_df = web_page_df[web_page_newcols]
web_page_df = web_page_df.persist()
wait(web_page_df)
bc.create_table('web_page_2', web_page_df)
query_2 = f"""
SELECT
CAST(wcs_user_sk AS INTEGER) AS wcs_user_sk,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec,
wcs_sales_sk,
wp_type_codes
FROM web_clickstreams
INNER JOIN web_page_2 ON wcs_web_page_sk = wp_web_page_sk
WHERE wcs_user_sk IS NOT NULL
AND wcs_click_date_sk BETWEEN {q08_start_dt} AND {q08_end_dt}
--in the future we want to remove this ORDER BY
ORDER BY wcs_user_sk
"""
merged_df = bc.sql(query_2)
bc.drop_table("web_page_2")
del web_page_df
merged_df = merged_df.shuffle(on=["wcs_user_sk"])
merged_df["review_flag"] = merged_df.wp_type_codes == REVIEW_CAT_CODE
prepped = merged_df.map_partitions(
prep_for_sessionization, review_cat_code=REVIEW_CAT_CODE
)
sessionized = prepped.map_partitions(get_sessions)
unique_review_sales = sessionized.map_partitions(
get_unique_sales_keys_from_sessions, review_cat_code=REVIEW_CAT_CODE
)
unique_review_sales = unique_review_sales.to_frame()
unique_review_sales = unique_review_sales.persist()
wait(unique_review_sales)
bc.create_table("reviews", unique_review_sales)
last_query = f"""
SELECT
CAST(review_total AS BIGINT) AS q08_review_sales_amount,
CAST(total - review_total AS BIGINT) AS no_q08_review_sales_amount
FROM
(
SELECT
SUM(ws_net_paid) AS total,
SUM(CASE when wcs_sales_sk IS NULL THEN 0 ELSE 1 END * ws_net_paid) AS review_total
FROM web_sales
LEFT OUTER JOIN reviews ON ws_order_number = wcs_sales_sk
WHERE ws_sold_date_sk between {q08_start_dt} AND {q08_end_dt}
)
"""
result = bc.sql(last_query)
bc.drop_table("reviews")
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q10/gpu_bdb_query_10_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences
)
from dask.distributed import wait
eol_char = "è"
def read_tables(data_dir, bc):
bc.create_table('product_reviews', os.path.join(data_dir, "product_reviews/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = """
SELECT pr_item_sk,
pr_review_content,
pr_review_sk
FROM product_reviews
where pr_review_content IS NOT NULL
ORDER BY pr_item_sk, pr_review_content, pr_review_sk
"""
product_reviews_df = bc.sql(query_1)
product_reviews_df[
"pr_review_content"
] = product_reviews_df.pr_review_content.str.lower()
product_reviews_df[
"pr_review_content"
] = product_reviews_df.pr_review_content.str.replace(
[".", "?", "!"], [eol_char], regex=False
)
sentences = product_reviews_df.map_partitions(create_sentences_from_reviews)
product_reviews_df = product_reviews_df[["pr_item_sk", "pr_review_sk"]]
product_reviews_df["pr_review_sk"] = product_reviews_df["pr_review_sk"].astype("int32")
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
product_reviews_df = product_reviews_df.persist()
wait(product_reviews_df)
bc.create_table('product_reviews_df', product_reviews_df)
sentences = sentences.persist()
wait(sentences)
bc.create_table('sentences', sentences)
# These files come from the official TPCx-BB kit
# We extracted them from bigbenchqueriesmr.jar
# Need to pass the absolute path for these txt files
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
bc.create_table('negative_sentiment',
os.path.join(sentiment_dir, "negativeSentiment.txt"),
names="sentiment_word",
file_format="csv")
bc.create_table('positive_sentiment',
os.path.join(sentiment_dir, "positiveSentiment.txt"),
names="sentiment_word",
file_format="csv")
word_df = word_df.persist()
wait(word_df)
bc.create_table('word_df', word_df)
query = '''
SELECT pr_item_sk as item_sk,
sentence as review_sentence,
sentiment,
sentiment_word FROM
(
SELECT review_idx_global_pos,
sentiment_word,
sentiment,
sentence FROM
(
WITH sent_df AS
(
(SELECT sentiment_word, 'POS' as sentiment
FROM positive_sentiment
GROUP BY sentiment_word)
UNION ALL
(SELECT sentiment_word, 'NEG' as sentiment
FROM negative_sentiment
GROUP BY sentiment_word)
)
SELECT * FROM word_df
INNER JOIN sent_df
ON word_df.word = sent_df.sentiment_word
) word_sentence_sentiment
LEFT JOIN sentences
ON word_sentence_sentiment.sentence_idx_global_pos = sentences.sentence_tokenized_global_pos
) temp
INNER JOIN product_reviews_df
ON temp.review_idx_global_pos = product_reviews_df.pr_review_sk
ORDER BY item_sk, review_sentence, sentiment, sentiment_word
'''
result = bc.sql(query)
bc.drop_table("product_reviews_df")
del product_reviews_df
bc.drop_table("sentences")
del sentences
bc.drop_table("word_df")
del word_df
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q10/README.md | # Query 10
In this query, For all products, we extract sentences from its product reviews that contain positive or negative sentiment and display for each item the sentiment polarity of the extracted sentences (POS OR NEG) and the sentence and word in sentence leading to this classification.
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q10/gpu_bdb_query_10_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
import os
import dask.dataframe as dd
import dask_cudf
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences
)
from bdb_tools.q10_utils import (
eol_char,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = """
SELECT pr_item_sk,
pr_review_content,
pr_review_sk
FROM product_reviews
where pr_review_content IS NOT NULL
ORDER BY pr_item_sk, pr_review_content, pr_review_sk
"""
product_reviews_df = c.sql(query_1)
product_reviews_df[
"pr_review_content"
] = product_reviews_df.pr_review_content.str.lower()
for char in [".", "?", "!"]:
product_reviews_df["pr_review_content"] = product_reviews_df.pr_review_content.str.replace(char, eol_char, regex=False)
sentences = product_reviews_df.map_partitions(create_sentences_from_reviews)
product_reviews_df = product_reviews_df[["pr_item_sk", "pr_review_sk"]]
product_reviews_df["pr_review_sk"] = product_reviews_df["pr_review_sk"].astype("int32")
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
product_reviews_df = product_reviews_df.persist()
wait(product_reviews_df)
c.create_table('product_reviews_df', product_reviews_df, persist=False)
sentences = sentences.persist()
wait(sentences)
c.create_table('sentences', sentences, persist=False)
# These files come from the official TPCx-BB kit
# We extracted them from bigbenchqueriesmr.jar
# Need to pass the absolute path for these txt files
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
if isinstance(product_reviews_df, dask_cudf.DataFrame):
ns_df = dask_cudf.read_csv(os.path.join(sentiment_dir, "negativeSentiment.txt"), names=["sentiment_word"])
else:
ns_df = dd.read_csv(os.path.join(sentiment_dir, "negativeSentiment.txt"), names=["sentiment_word"])
c.create_table('negative_sentiment', ns_df, persist=False)
if isinstance(product_reviews_df, dask_cudf.DataFrame):
ps_df = dask_cudf.read_csv(os.path.join(sentiment_dir, "positiveSentiment.txt"), names=["sentiment_word"])
else:
ps_df = dd.read_csv(os.path.join(sentiment_dir, "positiveSentiment.txt"), names=["sentiment_word"])
c.create_table('positive_sentiment', ps_df, persist=False)
word_df = word_df.persist()
wait(word_df)
c.create_table('word_df', word_df, persist=False)
query = '''
SELECT pr_item_sk as item_sk,
sentence as review_sentence,
sentiment,
sentiment_word FROM
(
SELECT review_idx_global_pos,
sentiment_word,
sentiment,
sentence FROM
(
WITH sent_df AS
(
(SELECT sentiment_word, 'POS' as sentiment
FROM positive_sentiment
GROUP BY sentiment_word)
UNION ALL
(SELECT sentiment_word, 'NEG' as sentiment
FROM negative_sentiment
GROUP BY sentiment_word)
)
SELECT * FROM word_df
INNER JOIN sent_df
ON word_df.word = sent_df.sentiment_word
) word_sentence_sentiment
LEFT JOIN sentences
ON word_sentence_sentiment.sentence_idx_global_pos = sentences.sentence_tokenized_global_pos
) temp
INNER JOIN product_reviews_df
ON temp.review_idx_global_pos = product_reviews_df.pr_review_sk
ORDER BY item_sk, review_sentence, sentiment, sentiment_word
'''
result = c.sql(query)
c.drop_table("product_reviews_df")
del product_reviews_df
c.drop_table("sentences")
del sentences
c.drop_table("word_df")
del word_df
return result
@annotate("QUERY10", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q10/gpu_bdb_query_10.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.text import create_sentences_from_reviews, create_words_from_sentences
from bdb_tools.q10_utils import (
eol_char,
read_tables
)
from dask.distributed import wait
def load_sentiment_words(filename, sentiment):
with open(filename) as fh:
sentiment_words = list(map(str.strip, fh.readlines()))
# dedupe for one extra record in the source file
sentiment_words = list(set(sentiment_words))
sent_df = cudf.DataFrame({"word": sentiment_words})
sent_df["sentiment"] = sentiment
return sent_df
def main(client, config):
product_reviews_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
product_reviews_df = product_reviews_df[
~product_reviews_df.pr_review_content.isnull()
].reset_index(drop=True)
product_reviews_df[
"pr_review_content"
] = product_reviews_df.pr_review_content.str.lower()
product_reviews_df[
"pr_review_content"
] = product_reviews_df.pr_review_content.str.replace(
[".", "?", "!"], [eol_char], regex=False
)
sentences = product_reviews_df.map_partitions(create_sentences_from_reviews)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
# These files come from the official TPCx-BB kit
# We extracted them from bigbenchqueriesmr.jar
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
neg_sent_df = load_sentiment_words(os.path.join(sentiment_dir, "negativeSentiment.txt"), "NEG")
pos_sent_df = load_sentiment_words(os.path.join(sentiment_dir, "positiveSentiment.txt"), "POS")
sent_df = cudf.concat([pos_sent_df, neg_sent_df])
sent_df = dask_cudf.from_cudf(sent_df, npartitions=1)
word_sentence_sentiment = word_df.merge(sent_df, how="inner", on="word")
temp = word_sentence_sentiment.merge(
sentences,
how="left",
left_on="sentence_idx_global_pos",
right_on="sentence_tokenized_global_pos",
)
temp = temp[["review_idx_global_pos", "word", "sentiment", "sentence"]]
product_reviews_df = product_reviews_df[["pr_item_sk", "pr_review_sk"]]
product_reviews_df["pr_review_sk"] = product_reviews_df["pr_review_sk"].astype(
"int32"
)
final = temp.merge(
product_reviews_df,
how="inner",
left_on="review_idx_global_pos",
right_on="pr_review_sk",
)
final = final.rename(
columns={
"pr_item_sk": "item_sk",
"sentence": "review_sentence",
"word": "sentiment_word",
}
)
keepcols = ["item_sk", "review_sentence", "sentiment", "sentiment_word"]
final = final[keepcols].persist()
# with sf100, there are 3.2M postive and negative review sentences(rows)
final = final.sort_values(by=keepcols)
final = final.persist()
wait(final)
return final
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q13/gpu_bdb_query_13_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q13_utils import read_tables
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = """
SELECT
ss.ss_customer_sk AS customer_sk,
sum( case when (d_year = 2001) THEN ss_net_paid ELSE 0.0 END) first_year_total,
sum( case when (d_year = 2002) THEN ss_net_paid ELSE 0.0 END) second_year_total
FROM store_sales ss
JOIN
(
SELECT d_date_sk, d_year
FROM date_dim d
WHERE d.d_year in (2001, 2002)
) dd on ( ss.ss_sold_date_sk = dd.d_date_sk )
GROUP BY ss.ss_customer_sk
HAVING sum( case when (d_year = 2001) THEN ss_net_paid ELSE 0.0 END) > 0.0
"""
temp_table1 = c.sql(query_1)
temp_table1 = temp_table1.persist()
wait(temp_table1)
c.create_table("temp_table1", temp_table1, persist=False)
query_2 = """
SELECT
ws.ws_bill_customer_sk AS customer_sk,
sum( case when (d_year = 2001) THEN ws_net_paid ELSE 0.0 END) first_year_total,
sum( case when (d_year = 2002) THEN ws_net_paid ELSE 0.0 END) second_year_total
FROM web_sales ws
JOIN
(
SELECT d_date_sk, d_year
FROM date_dim d
WHERE d.d_year in (2001, 2002)
) dd ON ( ws.ws_sold_date_sk = dd.d_date_sk )
GROUP BY ws.ws_bill_customer_sk
HAVING sum( case when (d_year = 2001) THEN ws_net_paid ELSE 0.0 END) > 0.0
"""
temp_table2 = c.sql(query_2)
temp_table2 = temp_table2.persist()
wait(temp_table2)
c.create_table("temp_table2", temp_table2, persist=False)
query = """
SELECT
CAST(c_customer_sk AS BIGINT) as c_customer_sk,
c_first_name,
c_last_name,
(store.second_year_total / store.first_year_total) AS storeSalesIncreaseRatio,
(web.second_year_total / web.first_year_total) AS webSalesIncreaseRatio
FROM temp_table1 store,
temp_table2 web,
customer c
WHERE store.customer_sk = web.customer_sk
AND web.customer_sk = c_customer_sk
AND (web.second_year_total / web.first_year_total) > (store.second_year_total / store.first_year_total)
ORDER BY webSalesIncreaseRatio DESC,
c_customer_sk,
c_first_name,
c_last_name
LIMIT 100
"""
result = c.sql(query)
c.drop_table("temp_table1")
c.drop_table("temp_table2")
return result
@annotate("QUERY13", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q13/gpu_bdb_query_13.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q13_utils import read_tables
from distributed import wait
q13_Year = 2001
q13_limit = 100
# Util Function
def get_sales_ratio(df):
f_year = q13_Year
s_year = q13_Year + 1
first_year_flag = df["d_year"] == f_year
second_year_flag = df["d_year"] == s_year
df["first_year_sales"] = 0.00
df["first_year_sales"][first_year_flag] = df["year_total"][first_year_flag]
df["second_year_sales"] = 0.00
df["second_year_sales"][second_year_flag] = df["year_total"][second_year_flag]
return df
def main(client, config):
date_dim_df, customer_df, s_sales_df, web_sales_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
# Query 0: time filtration
filtered_date_df = date_dim_df.query(
"d_year >= @q13_Year and d_year <= @q13_Year_plus",
local_dict={"q13_Year": q13_Year, "q13_Year_plus": q13_Year + 1},
meta=date_dim_df._meta,
).reset_index(drop=True)
s_sales_df = s_sales_df.merge(
filtered_date_df, how="inner", left_on="ss_sold_date_sk", right_on="d_date_sk"
)
web_sales_df = web_sales_df.merge(
filtered_date_df, how="inner", left_on="ws_sold_date_sk", right_on="d_date_sk"
)
# Query 1: Store Sales
# SELECT
# ss.ss_customer_sk AS customer_sk,
# sum( case when (d_year = {q13_Year}) THEN ss_net_paid ELSE 0 END) first_year_total,
# sum( case when (d_year = {q13_Year}+1) THEN ss_net_paid ELSE 0 END) second_year_total
# FROM store_sales ss
# JOIN (
# SELECT d_date_sk, d_year
# FROM date_dim d
# WHERE d.d_year in ({q13_Year}, (q13_Year} + 1))) dd on ( ss.ss_sold_date_sk = dd.d_date_sk )
# GROUP BY ss.ss_customer_sk
# HAVING first_year_total > 0
s_grouped_df = (
s_sales_df.groupby(by=["ss_customer_sk", "d_year"])
.agg({"ss_net_paid": "sum"})
.reset_index()
.rename(columns={"ss_net_paid": "year_total"})
)
sales_ratio_df = s_grouped_df.map_partitions(get_sales_ratio)
sales_ratio_df = (
sales_ratio_df.groupby(by="ss_customer_sk")
.agg({"first_year_sales": "max", "second_year_sales": "max"})
.reset_index()
)
sales_ratio_df = sales_ratio_df.query("first_year_sales>0")
sales_ratio_df["storeSalesIncreaseRatio"] = (
sales_ratio_df["second_year_sales"] / sales_ratio_df["first_year_sales"]
)
sales_ratio_df = sales_ratio_df.drop(
["first_year_sales", "second_year_sales"], axis=1
).rename(columns={"ss_customer_sk": "c_customer_sk"})
# Query 2: Web Sales
# SELECT
# ws.ws_bill_customer_sk AS customer_sk,
# sum( case when (d_year = {q13_Year}) THEN ws_net_paid ELSE 0 END) first_year_total,
# sum( case when (d_year = {q13_Year}+1) THEN ws_net_paid ELSE 0 END) second_year_total
# FROM web_sales ws
# JOIN (
# SELECT d_date_sk, d_year
# FROM date_dim d
# WHERE d.d_year in ({q13_Year}, ({q13_Year} + 1) )
# ) dd ON ( ws.ws_sold_date_sk = dd.d_date_sk )
# GROUP BY ws.ws_bill_customer_sk
# HAVING first_year_total > 0
web_grouped_df = (
web_sales_df.groupby(by=["ws_bill_customer_sk", "d_year"])
.agg({"ws_net_paid": "sum"})
.reset_index()
.rename(columns={"ws_net_paid": "year_total"})
)
web_ratio_df = web_grouped_df.map_partitions(get_sales_ratio)
web_ratio_df = (
web_ratio_df.groupby(by="ws_bill_customer_sk")
.agg({"first_year_sales": "max", "second_year_sales": "max"})
.reset_index()
)
web_ratio_df = web_ratio_df.query("first_year_sales>0")
web_ratio_df["webSalesIncreaseRatio"] = (
web_ratio_df["second_year_sales"] / web_ratio_df["first_year_sales"]
)
web_ratio_df = web_ratio_df.drop(
["first_year_sales", "second_year_sales"], axis=1
).rename(columns={"ws_bill_customer_sk": "c_customer_sk"})
# Results Query
# SELECT
# c_customer_sk,
# c_first_name,
# c_last_name,
# (store.second_year_total / store.first_year_total) AS storeSalesIncreaseRatio ,
# (web.second_year_total / web.first_year_total) AS webSalesIncreaseRatio
# FROM store ,
# web ,
# customer c
# WHERE store.customer_sk = web.customer_sk
# AND web.customer_sk = c_customer_sk
# AND (web.second_year_total / web.first_year_total) > (store.second_year_total / store.first_year_total)
# ORDER BY
# webSalesIncreaseRatio DESC,
# c_customer_sk,
# c_first_name,
# c_last_name
# LIMIT {q13_limit}
both_sales = sales_ratio_df.merge(web_ratio_df, how="inner", on="c_customer_sk")
# need to enforce both being int64 even though both_sales.c_customer_sk is already
# int64. Figure this out later
customer_df["c_customer_sk"] = customer_df["c_customer_sk"].astype("int64")
both_sales["c_customer_sk"] = both_sales["c_customer_sk"].astype("int64")
final_df = customer_df.merge(both_sales, how="inner", on="c_customer_sk").query(
"webSalesIncreaseRatio > storeSalesIncreaseRatio"
)
final_df = final_df.drop("c_customer_id", axis=1)
result_df = final_df.repartition(npartitions=1).persist()
wait(result_df)
result_df = result_df.map_partitions(
lambda df: df.sort_values(
["webSalesIncreaseRatio", "c_customer_sk", "c_first_name", "c_last_name"],
ascending=[False, True, True, True],
)
)
result_df = result_df.reset_index(drop=True)
result_df = result_df.head(q13_limit)
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q13/gpu_bdb_query_13_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
def read_tables(data_dir, bc):
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("customer", os.path.join(data_dir, "customer/*.parquet"))
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = """
SELECT
ss.ss_customer_sk AS customer_sk,
sum( case when (d_year = 2001) THEN ss_net_paid ELSE 0.0 END) first_year_total,
sum( case when (d_year = 2002) THEN ss_net_paid ELSE 0.0 END) second_year_total
FROM store_sales ss
JOIN
(
SELECT d_date_sk, d_year
FROM date_dim d
WHERE d.d_year in (2001, 2002)
) dd on ( ss.ss_sold_date_sk = dd.d_date_sk )
GROUP BY ss.ss_customer_sk
HAVING sum( case when (d_year = 2001) THEN ss_net_paid ELSE 0.0 END) > 0.0
"""
temp_table1 = bc.sql(query_1)
temp_table1 = temp_table1.persist()
wait(temp_table1)
bc.create_table("temp_table1", temp_table1)
query_2 = """
SELECT
ws.ws_bill_customer_sk AS customer_sk,
sum( case when (d_year = 2001) THEN ws_net_paid ELSE 0.0 END) first_year_total,
sum( case when (d_year = 2002) THEN ws_net_paid ELSE 0.0 END) second_year_total
FROM web_sales ws
JOIN
(
SELECT d_date_sk, d_year
FROM date_dim d
WHERE d.d_year in (2001, 2002)
) dd ON ( ws.ws_sold_date_sk = dd.d_date_sk )
GROUP BY ws.ws_bill_customer_sk
HAVING sum( case when (d_year = 2001) THEN ws_net_paid ELSE 0.0 END) > 0.0
"""
temp_table2 = bc.sql(query_2)
temp_table2 = temp_table2.persist()
wait(temp_table2)
bc.create_table("temp_table2", temp_table2)
query = """
SELECT
CAST(c_customer_sk AS BIGINT) as c_customer_sk,
c_first_name,
c_last_name,
(store.second_year_total / store.first_year_total) AS storeSalesIncreaseRatio,
(web.second_year_total / web.first_year_total) AS webSalesIncreaseRatio
FROM temp_table1 store,
temp_table2 web,
customer c
WHERE store.customer_sk = web.customer_sk
AND web.customer_sk = c_customer_sk
AND (web.second_year_total / web.first_year_total) > (store.second_year_total / store.first_year_total)
ORDER BY webSalesIncreaseRatio DESC,
c_customer_sk,
c_first_name,
c_last_name
LIMIT 100
"""
result = bc.sql(query)
bc.drop_table("temp_table1")
bc.drop_table("temp_table2")
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q30/gpu_bdb_query_30_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.sessionization import (
get_distinct_sessions,
get_pairs
)
from dask.distributed import wait
# -------- Q30 -----------
# session timeout in secs
q30_session_timeout_inSec = 3600
# query output limit
q30_limit = 40
def read_tables(data_dir, bc):
bc.create_table('web_clickstreams', os.path.join(data_dir, "web_clickstreams/*.parquet"))
bc.create_table('item', os.path.join(data_dir, "item/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = """
SELECT i_item_sk,
CAST(i_category_id AS TINYINT) AS i_category_id
FROM item
"""
item_df = bc.sql(query_1)
item_df = item_df.persist()
wait(item_df)
bc.create_table("item_df", item_df)
query_2 = """
SELECT wcs_user_sk,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec,
i_category_id
FROM web_clickstreams wcs, item_df i
WHERE wcs.wcs_item_sk = i.i_item_sk
AND i.i_category_id IS NOT NULL
AND wcs.wcs_user_sk IS NOT NULL
ORDER BY wcs.wcs_user_sk, tstamp_inSec, i_category_id
"""
merged_df = bc.sql(query_2)
bc.drop_table("item_df")
del item_df
distinct_session_df = merged_df.map_partitions(get_distinct_sessions,
keep_cols=["wcs_user_sk", "i_category_id"],
time_out=q30_session_timeout_inSec)
del merged_df
pair_df = distinct_session_df.map_partitions(
get_pairs,
pair_col="i_category_id",
output_col_1="category_id_1",
output_col_2="category_id_2")
del distinct_session_df
pair_df = pair_df.persist()
wait(pair_df)
bc.create_table('pair_df', pair_df)
last_query = f"""
SELECT CAST(category_id_1 AS BIGINT) AS category_id_1,
CAST(category_id_2 AS BIGINT) AS category_id_2,
COUNT(category_id_2) AS cnt
FROM pair_df
GROUP BY category_id_1, category_id_2
ORDER BY cnt desc
LIMIT {q30_limit}
"""
result = bc.sql(last_query)
bc.drop_table("pair_df")
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q30/gpu_bdb_query_30_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.sessionization import (
get_distinct_sessions,
get_pairs
)
from bdb_tools.q30_utils import (
q30_session_timeout_inSec,
q30_limit,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = """
SELECT i_item_sk,
CAST(i_category_id AS TINYINT) AS i_category_id
FROM item
"""
item_df = c.sql(query_1)
item_df = item_df.persist()
wait(item_df)
c.create_table("item_df", item_df, persist=False)
query_2 = """
SELECT wcs_user_sk,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec,
i_category_id
FROM web_clickstreams wcs, item_df i
WHERE wcs.wcs_item_sk = i.i_item_sk
AND i.i_category_id IS NOT NULL
AND wcs.wcs_user_sk IS NOT NULL
DISTRIBUTE BY wcs_user_sk
"""
merged_df = c.sql(query_2)
c.drop_table("item_df")
del item_df
distinct_session_df = merged_df.map_partitions(get_distinct_sessions,
keep_cols=["wcs_user_sk", "i_category_id"],
time_out=q30_session_timeout_inSec)
del merged_df
pair_df = distinct_session_df.map_partitions(
get_pairs,
pair_col="i_category_id",
output_col_1="category_id_1",
output_col_2="category_id_2")
del distinct_session_df
c.create_table('pair_df', pair_df, persist=False)
last_query = f"""
SELECT CAST(category_id_1 AS BIGINT) AS category_id_1,
CAST(category_id_2 AS BIGINT) AS category_id_2,
COUNT(category_id_2) AS cnt
FROM pair_df
GROUP BY category_id_1, category_id_2
ORDER BY cnt desc
LIMIT {q30_limit}
"""
result = c.sql(last_query)
c.drop_table("pair_df")
return result
@annotate("QUERY30", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q30/gpu_bdb_query_30.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q30_utils import (
q30_session_timeout_inSec,
q30_limit,
read_tables
)
from bdb_tools.sessionization import get_distinct_sessions, get_pairs
from dask import delayed
import numpy as np
### Implementation Notes:
### Future Notes:
# The bottleneck of current implementation is `set-index`, once ucx is working correctly
# it should go away
def pre_repartition_task(wcs_fn, f_item_df):
"""
Runs the pre-repartition task
"""
wcs_cols = ["wcs_user_sk", "wcs_item_sk", "wcs_click_date_sk", "wcs_click_time_sk"]
wcs_df = cudf.read_parquet(wcs_fn, columns=wcs_cols)
f_wcs_df = wcs_df[wcs_df["wcs_user_sk"].notnull()].reset_index(drop=True)
merged_df = f_wcs_df.merge(
f_item_df, left_on=["wcs_item_sk"], right_on=["i_item_sk"]
)
del wcs_df
merged_df["tstamp_inSec"] = (
merged_df["wcs_click_date_sk"] * 24 * 60 * 60 + merged_df["wcs_click_time_sk"]
)
cols_keep = ["wcs_user_sk", "tstamp_inSec", "i_category_id"]
merged_df = merged_df[cols_keep]
merged_df["i_category_id"] = merged_df["i_category_id"].astype(np.int8)
return merged_df
def main(client, config):
item_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
"""
Filter and Join web_clickstreams and item table.
SELECT wcs_user_sk,
(wcs_click_date_sk*24L*60L*60L + wcs_click_time_sk) AS tstamp_inSec,
i_category_id
FROM web_clickstreams wcs, item i
WHERE wcs.wcs_item_sk = i.i_item_sk
AND i.i_category_id IS NOT NULL
AND wcs.wcs_user_sk IS NOT NULL
"""
f_item_df = item_df[item_df["i_category_id"].notnull()].reset_index(drop=True)
# The main idea is that we don't fuse a filtration task with reading task yet
# this causes more memory pressures as we try to read the whole thing ( and spill that)
# at once and then do filtration .
web_clickstream_flist = glob.glob(os.path.join(config["data_dir"], "web_clickstreams/*.parquet"))
task_ls = [
delayed(pre_repartition_task)(fn, f_item_df.to_delayed()[0])
for fn in web_clickstream_flist
]
meta_d = {
"wcs_user_sk": np.ones(1, dtype=np.int64),
"tstamp_inSec": np.ones(1, dtype=np.int64),
"i_category_id": np.ones(1, dtype=np.int8),
}
meta_df = cudf.DataFrame(meta_d)
merged_df = dask_cudf.from_delayed(task_ls, meta=meta_df)
### that the click for each user ends up at the same partition
merged_df = merged_df.shuffle(on=["wcs_user_sk"])
### Main Query
### sessionize logic.
distinct_session_df = merged_df.map_partitions(
get_distinct_sessions,
keep_cols=["wcs_user_sk", "i_category_id"],
time_out=q30_session_timeout_inSec,
)
del merged_df
### create pairs out of item category id's.
pair_df = distinct_session_df.map_partitions(
get_pairs,
pair_col="i_category_id",
output_col_1="category_id_1",
output_col_2="category_id_2",
)
del distinct_session_df
### apply groupby on "category_id_1", "category_id_2"
grouped_df = (
pair_df.groupby(["category_id_1", "category_id_2"])
.size(split_every=2)
.reset_index()
)
grouped_df.columns = ["category_id_1", "category_id_2", "cnt"]
result_df = grouped_df.repartition(npartitions=1).persist()
### sort records in desc order and reset index.
### below only has 40 rows so leaving as cudf frame should be fine
result_df = result_df.map_partitions(
lambda x: x.sort_values("cnt", ascending=False)
)
result_df = result_df.reset_index(drop=True).head(q30_limit)
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q05/gpu_bdb_query_05_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from dask.distributed import wait
from dask import delayed
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q05_utils import (
build_and_predict_model,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = """
SELECT
--wcs_user_sk,
clicks_in_category,
CASE WHEN cd_education_status IN ('Advanced Degree', 'College', '4 yr Degree', '2 yr Degree')
THEN 1 ELSE 0 END AS college_education,
CASE WHEN cd_gender = 'M' THEN 1 ELSE 0 END AS male,
clicks_in_1,
clicks_in_2,
clicks_in_3,
clicks_in_4,
clicks_in_5,
clicks_in_6,
clicks_in_7
FROM
(
SELECT
wcs_user_sk,
SUM( CASE WHEN i_category = 'Books' THEN 1 ELSE 0 END) AS clicks_in_category,
SUM( CASE WHEN i_category_id = 1 THEN 1 ELSE 0 END) AS clicks_in_1,
SUM( CASE WHEN i_category_id = 2 THEN 1 ELSE 0 END) AS clicks_in_2,
SUM( CASE WHEN i_category_id = 3 THEN 1 ELSE 0 END) AS clicks_in_3,
SUM( CASE WHEN i_category_id = 4 THEN 1 ELSE 0 END) AS clicks_in_4,
SUM( CASE WHEN i_category_id = 5 THEN 1 ELSE 0 END) AS clicks_in_5,
SUM( CASE WHEN i_category_id = 6 THEN 1 ELSE 0 END) AS clicks_in_6,
SUM( CASE WHEN i_category_id = 7 THEN 1 ELSE 0 END) AS clicks_in_7
FROM web_clickstreams
INNER JOIN item it ON
(
wcs_item_sk = i_item_sk
AND wcs_user_sk IS NOT NULL
)
GROUP BY wcs_user_sk
) q05_user_clicks_in_cat
INNER JOIN customer ct ON wcs_user_sk = c_customer_sk
INNER JOIN customer_demographics ON c_current_cdemo_sk = cd_demo_sk
"""
cust_and_clicks_ddf = c.sql(query)
cust_and_clicks_ddf = cust_and_clicks_ddf.repartition(npartitions=1)
# Convert clicks_in_category to a binary label
cust_and_clicks_ddf["clicks_in_category"] = (
cust_and_clicks_ddf["clicks_in_category"]
> cust_and_clicks_ddf["clicks_in_category"].mean()
).astype("int64")
# Converting the dataframe to float64 as cuml logistic reg requires this
ml_input_df = cust_and_clicks_ddf.astype("float64")
ml_input_df = ml_input_df.persist()
wait(ml_input_df)
ml_tasks = [delayed(build_and_predict_model)(df) for df in ml_input_df.to_delayed()]
results_dict = client.compute(*ml_tasks, sync=True)
return results_dict
@annotate("QUERY5", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q05/gpu_bdb_query_05.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import glob
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q05_utils import (
build_and_predict_model,
wcs_columns,
read_tables
)
import numpy as np
from dask import delayed
import pandas as pd
#
# Query Configuration
#
COLLEGE_ED_STRS = ["Advanced Degree", "College", "4 yr Degree", "2 yr Degree"]
Q05_I_CATEGORY = "Books"
def get_groupby_results(file_list, item_df):
"""
Functionial approach for better scaling
"""
sum_by_cat_ddf = None
for fn in file_list:
wcs_ddf = cudf.read_parquet(fn, columns=wcs_columns)
wcs_ddf = wcs_ddf[wcs_ddf.wcs_user_sk.notnull()].reset_index(drop=True)
wcs_ddf = wcs_ddf.merge(
item_df, left_on="wcs_item_sk", right_on="i_item_sk", how="inner"
)
keep_cols = ["wcs_user_sk", "i_category_id", "clicks_in_category"]
wcs_ddf = wcs_ddf[keep_cols]
wcs_ddf = cudf.get_dummies(
wcs_ddf,
columns=["i_category_id"],
prefix="clicks_in",
prefix_sep="_",
cats={"i_category_id":np.arange(1, 8, dtype="int32")},
dtype=np.int8,
)
keep_cols = ["wcs_user_sk", "clicks_in_category"] + [
f"clicks_in_{i}" for i in range(1, 8)
]
wcs_ddf = wcs_ddf[keep_cols]
### todo: can be shifted downstream to make only 1 groupby call
grouped_df = wcs_ddf.groupby(["wcs_user_sk"], sort=False, as_index=False).sum()
if sum_by_cat_ddf is None:
sum_by_cat_ddf = grouped_df
else:
# Roll up to the number of clicks per user
sum_by_cat_ddf = (
cudf.concat([sum_by_cat_ddf, grouped_df])
.groupby("wcs_user_sk", sort=False, as_index=False)
.sum()
)
del grouped_df
del wcs_ddf
return sum_by_cat_ddf
def main(client, config):
item_ddf, customer_ddf, customer_dem_ddf = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
# We want to find clicks in the parameterized category
# It would be more efficient to translate to a category id, but
# all of the SQL samples refer to string categories directly We'll
# call this clicks_in_category to match the names used in SQL
# examples, though clicks_in_target would be a much better name
item_ddf["clicks_in_category"] = (
(item_ddf["i_category"] == Q05_I_CATEGORY)
.astype(np.int8)
)
keep_cols = ["i_item_sk", "i_category_id", "clicks_in_category"]
item_ddf = item_ddf[keep_cols]
web_clickstream_flist = glob.glob(os.path.join(config["data_dir"], "web_clickstreams/*.parquet"))
n_workers = len(client.scheduler_info()["workers"])
batchsize = len(web_clickstream_flist) // n_workers
if batchsize < 1:
batchsize = 1
chunks = [
web_clickstream_flist[x : x + batchsize]
for x in range(0, len(web_clickstream_flist), batchsize)
]
task_ls = [
delayed(get_groupby_results)(c, item_ddf.to_delayed()[0]) for c in chunks
]
meta_d = {
"wcs_user_sk": {},
"clicks_in_category": {},
"clicks_in_1": {},
"clicks_in_2": {},
"clicks_in_3": {},
"clicks_in_4": {},
"clicks_in_5": {},
"clicks_in_6": {},
"clicks_in_7": {},
}
df = cudf.from_pandas(pd.DataFrame.from_dict(meta_d, dtype="int64"))
sum_by_cat_ddf = dask_cudf.from_delayed(task_ls, meta=df)
sum_by_cat_ddf = sum_by_cat_ddf.groupby(["wcs_user_sk"], sort=True).sum()
sum_by_cat_ddf = sum_by_cat_ddf.reset_index(drop=False)
#
# Combine user-level click summaries with customer demographics
#
customer_merged_ddf = customer_ddf.merge(
customer_dem_ddf, left_on="c_current_cdemo_sk", right_on="cd_demo_sk"
)
customer_merged_ddf = customer_merged_ddf[
["c_customer_sk", "cd_gender", "cd_education_status"]
]
customer_merged_ddf["college_education"] = (
customer_merged_ddf.cd_education_status.isin(COLLEGE_ED_STRS)
.astype(np.int64)
.fillna(0)
).reset_index(drop=True)
customer_merged_ddf["male"] = (
(customer_merged_ddf["cd_gender"] == "M").astype(np.int64).fillna(0)
).reset_index(drop=True)
cust_and_clicks_ddf = customer_merged_ddf[
["c_customer_sk", "college_education", "male"]
].merge(sum_by_cat_ddf, left_on="c_customer_sk", right_on="wcs_user_sk")
keep_cols = ["clicks_in_category", "college_education", "male"] + [
f"clicks_in_{i}" for i in range(1, 8)
]
cust_and_clicks_ddf = cust_and_clicks_ddf[keep_cols]
# The ETL step in spark covers everything above this point
# Convert clicks_in_category to a binary label
cust_and_clicks_ddf["clicks_in_category"] = (
(
cust_and_clicks_ddf["clicks_in_category"]
> cust_and_clicks_ddf["clicks_in_category"].mean()
)
.reset_index(drop=True)
.astype(np.int64)
)
# Converting the dataframe to float64 as cuml logistic reg requires this
ml_input_df = cust_and_clicks_ddf.astype("float64")
ml_input_df = ml_input_df.persist()
ml_tasks = [delayed(build_and_predict_model)(df) for df in ml_input_df.to_delayed()]
results_dict = client.compute(*ml_tasks, sync=True)
return results_dict
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q05/gpu_bdb_query_05_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
from dask_cuda import LocalCUDACluster
from dask.distributed import Client, wait
from dask import delayed
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.cupy_metrics import cupy_precision_score
from sklearn.metrics import roc_auc_score
import cupy as cp
# Logistic Regression params
# solver = "LBFGS" Used by passing `penalty=None` or "l2"
# step_size = 1 Not used
# numCorrections = 10 Not used
iterations = 100
C = 10_000 # reg_lambda = 0 hence C for model is a large value
convergence_tol = 1e-9
def read_tables(data_dir, bc):
bc.create_table("web_clickstreams", os.path.join(data_dir, "web_clickstreams/*.parquet"))
bc.create_table("customer", os.path.join(data_dir, "customer/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
bc.create_table(
"customer_demographics", os.path.join(data_dir, "customer_demographics/*.parquet"
))
def build_and_predict_model(ml_input_df):
"""
Create a standardized feature matrix X and target array y.
Returns the model and accuracy statistics
"""
import cuml
from cuml.metrics import confusion_matrix
feature_names = ["college_education", "male"] + [
"clicks_in_%d" % i for i in range(1, 8)
]
X = ml_input_df[feature_names]
# Standardize input matrix
X = (X - X.mean()) / X.std()
y = ml_input_df["clicks_in_category"]
model = cuml.LogisticRegression(
tol=convergence_tol,
penalty="none",
solver="qn",
fit_intercept=True,
max_iter=iterations,
C=C,
)
model.fit(X, y)
#
# Predict and evaluate accuracy
# (Should be 1.0) at SF-1
#
results_dict = {}
y_pred = model.predict(X)
results_dict["auc"] = roc_auc_score(y.to_array(), y_pred.to_array())
results_dict["precision"] = cupy_precision_score(cp.asarray(y), cp.asarray(y_pred))
results_dict["confusion_matrix"] = confusion_matrix(
cp.asarray(y, dtype="int32"), cp.asarray(y_pred, dtype="int32")
)
results_dict["output_type"] = "supervised"
return results_dict
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = """
SELECT
--wcs_user_sk,
clicks_in_category,
CASE WHEN cd_education_status IN ('Advanced Degree', 'College', '4 yr Degree', '2 yr Degree')
THEN 1 ELSE 0 END AS college_education,
CASE WHEN cd_gender = 'M' THEN 1 ELSE 0 END AS male,
clicks_in_1,
clicks_in_2,
clicks_in_3,
clicks_in_4,
clicks_in_5,
clicks_in_6,
clicks_in_7
FROM
(
SELECT
wcs_user_sk,
SUM( CASE WHEN i_category = 'Books' THEN 1 ELSE 0 END) AS clicks_in_category,
SUM( CASE WHEN i_category_id = 1 THEN 1 ELSE 0 END) AS clicks_in_1,
SUM( CASE WHEN i_category_id = 2 THEN 1 ELSE 0 END) AS clicks_in_2,
SUM( CASE WHEN i_category_id = 3 THEN 1 ELSE 0 END) AS clicks_in_3,
SUM( CASE WHEN i_category_id = 4 THEN 1 ELSE 0 END) AS clicks_in_4,
SUM( CASE WHEN i_category_id = 5 THEN 1 ELSE 0 END) AS clicks_in_5,
SUM( CASE WHEN i_category_id = 6 THEN 1 ELSE 0 END) AS clicks_in_6,
SUM( CASE WHEN i_category_id = 7 THEN 1 ELSE 0 END) AS clicks_in_7
FROM web_clickstreams
INNER JOIN item it ON
(
wcs_item_sk = i_item_sk
AND wcs_user_sk IS NOT NULL
)
GROUP BY wcs_user_sk
) q05_user_clicks_in_cat
INNER JOIN customer ct ON wcs_user_sk = c_customer_sk
INNER JOIN customer_demographics ON c_current_cdemo_sk = cd_demo_sk
"""
cust_and_clicks_ddf = bc.sql(query)
cust_and_clicks_ddf = cust_and_clicks_ddf.repartition(npartitions=1)
# Convert clicks_in_category to a binary label
cust_and_clicks_ddf["clicks_in_category"] = (
cust_and_clicks_ddf["clicks_in_category"]
> cust_and_clicks_ddf["clicks_in_category"].mean()
).astype("int64")
# Converting the dataframe to float64 as cuml logistic reg requires this
ml_input_df = cust_and_clicks_ddf.astype("float64")
ml_input_df = ml_input_df.persist()
wait(ml_input_df)
ml_tasks = [delayed(build_and_predict_model)(df) for df in ml_input_df.to_delayed()]
results_dict = client.compute(*ml_tasks, sync=True)
return results_dict
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/load_test/gpu_bdb_load_test.py | from bdb_tools.utils import benchmark, gpubdb_argparser, run_query
from bdb_tools.readers import build_reader
import os, subprocess, math, time
config = gpubdb_argparser()
spark_schema_dir = f"{os.getcwd()}/spark_table_schemas/"
# these tables have extra data produced by bigbench dataGen
refresh_tables = [
"customer",
"customer_address",
"inventory",
"item",
"item_marketprices",
"product_reviews",
"store_returns",
"store_sales",
"web_clickstreams",
"web_returns",
"web_sales",
]
tables = [table.split(".")[0] for table in os.listdir(spark_schema_dir)]
scale = [x for x in config["data_dir"].split("/") if "sf" in x][0]
part_size = 2
chunksize = "128 MiB"
# Spark uses different names for column types, and RAPIDS doesn't yet support Decimal types.
def get_schema(table):
with open(f"{spark_schema_dir}{table}.schema") as fp:
schema = fp.read()
names = [line.replace(",", "").split()[0] for line in schema.split("\n")]
types = [
line.replace(",", "")
.split()[1]
.replace("bigint", "int")
.replace("string", "str")
for line in schema.split("\n")
]
types = [
col_type.split("(")[0].replace("decimal", "float") for col_type in types
]
return names, types
def read_csv_table(table, chunksize="256 MiB"):
# build dict of dtypes to use when reading CSV
names, types = get_schema(table)
dtype = {names[i]: types[i] for i in range(0, len(names))}
data_dir = config["data_dir"].split('parquet_')[0]
base_path = f"{data_dir}/data/{table}"
files = os.listdir(base_path)
# item_marketprices has "audit" files that should be excluded
if table == "item_marketprices":
paths = [
f"{base_path}/{fn}"
for fn in files
if "audit" not in fn and os.path.getsize(f"{base_path}/{fn}") > 0
]
base_path = f"{data_dir}/data_refresh/{table}"
paths = paths + [
f"{base_path}/{fn}"
for fn in os.listdir(base_path)
if "audit" not in fn and os.path.getsize(f"{base_path}/{fn}") > 0
]
df = dask_cudf.read_csv(
paths, sep="|", names=names, dtype=dtype, chunksize=chunksize, quoting=3
)
else:
paths = [
f"{base_path}/{fn}"
for fn in files
if os.path.getsize(f"{base_path}/{fn}") > 0
]
if table in refresh_tables:
base_path = f"{data_dir}/data_refresh/{table}"
paths = paths + [
f"{base_path}/{fn}"
for fn in os.listdir(base_path)
if os.path.getsize(f"{base_path}/{fn}") > 0
]
df = dask_cudf.read_csv(
paths, sep="|", names=names, dtype=types, chunksize=chunksize, quoting=3
)
return df
def multiplier(unit):
if unit == "G":
return 1
elif unit == "T":
return 1000
else:
return 0
# we use size of the CSV data on disk to determine number of Parquet partitions
def get_size_gb(table):
data_dir = config["data_dir"].split('parquet_')[0]
path = data_dir + "/data/" + table
size = subprocess.check_output(["du", "-sh", path]).split()[0].decode("utf-8")
unit = size[-1]
size = math.ceil(float(size[:-1])) * multiplier(unit)
if table in refresh_tables:
path = data_dir + "/data_refresh/" + table
refresh_size = (
subprocess.check_output(["du", "-sh", path]).split()[0].decode("utf-8")
)
size = size + math.ceil(float(refresh_size[:-1])) * multiplier(refresh_size[-1])
return size
def repartition(table, outdir, npartitions=None, chunksize=None, compression="snappy"):
size = get_size_gb(table)
if npartitions is None:
npartitions = max(1, size)
print(
f"Converting {table} of {size} GB to {npartitions} parquet files, chunksize: {chunksize}"
)
read_csv_table(table, chunksize).repartition(
npartitions=npartitions
).to_parquet(outdir + table, compression=compression, index=False)
def main(client, config):
# location you want to write Parquet versions of the table data
data_dir = config["data_dir"].split('parquet_')[0]
outdir = f"{data_dir}/parquet_{part_size}gb/"
t0 = time.time()
for table in tables:
size_gb = get_size_gb(table)
# product_reviews has lengthy strings which exceed cudf's max number of characters per column
# we use smaller partitions to avoid overflowing this character limit
if table == "product_reviews":
npartitions = max(1, int(size_gb / 1))
else:
npartitions = max(1, int(size_gb / part_size))
repartition(table, outdir, npartitions, chunksize, compression="snappy")
print(f"Load test with chunk size of {chunksize} took {time.time() - t0:.2f}s")
return cudf.DataFrame()
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
import cudf
import dask_cudf
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q01/gpu_bdb_query_01.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import benchmark, gpubdb_argparser, run_query
from bdb_tools.q01_utils import (
q01_i_category_id_IN,
q01_ss_store_sk_IN,
q01_viewed_together_count,
q01_limit,
read_tables
)
### Implementation Notes:
# `drop_duplicates` and `groupby` by default brings result to single partition
# Have changed `drop_duplicates` behaviour to give `n_workers` partitions
# This can change performence chars at larger scales
### Future Notes:
# Settinng index + merge using map_parition can be a work-around if dask native merge is slow
### Inner Self join to get pairs
# Select t1.ss_item_sk as item_sk_1 , t2.ss_item_sk as item_sk_2
# FROM (
# ...
# ) t1 Inner Join
# (
# ...
# ) t2
# ON t1.ss_ticket_number == t2.ss_ticket_number
# Where
# t1.ss_item_sk < t2.ss_item_sk
def get_pairs(
df,
col_name="ss_item_sk",
merge_col="ss_ticket_number",
pair_col="ss_item_sk",
output_col_1="item_sk_1",
output_col_2="item_sk_2",
):
pair_df = df.merge(df, on=merge_col, suffixes=["_t1", "_t2"])
pair_df = pair_df[[f"{pair_col}_t1", f"{pair_col}_t2"]]
pair_df = pair_df[
pair_df[f"{pair_col}_t1"] < pair_df[f"{pair_col}_t2"]
].reset_index(drop=True)
pair_df = pair_df.rename(
columns={f"{pair_col}_t1": output_col_1, f"{pair_col}_t2": output_col_2}
)
return pair_df
def main(client, config):
item_df, ss_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
# SELECT DISTINCT ss_item_sk,ss_ticket_number
# FROM store_sales s, item i
# -- Only products in certain categories sold in specific stores are considered,
# WHERE s.ss_item_sk = i.i_item_sk
# AND i.i_category_id IN ({q01_i_category_id_IN})
# AND s.ss_store_sk IN ({q01_ss_store_sk_IN})
f_ss_df = ss_df.loc[ss_df["ss_store_sk"].isin(q01_ss_store_sk_IN)][
["ss_item_sk", "ss_ticket_number"]
].reset_index(drop=True)
f_item_df = item_df.loc[item_df["i_category_id"].isin(q01_i_category_id_IN)][
["i_item_sk"]
].reset_index(drop=True)
ss_item_join = f_item_df.merge(
f_ss_df, left_on=["i_item_sk"], right_on=["ss_item_sk"]
)
ss_item_join = ss_item_join[["ss_item_sk", "ss_ticket_number"]]
## keep to a single partitions
## We only have 41,910,265 rows in the dataframe at sf-10k and dont need to split_out.
ss_item_join = ss_item_join.drop_duplicates()
### do pair inner join
pair_df = get_pairs(ss_item_join)
# SELECT item_sk_1, item_sk_2, COUNT(*) AS cnt
# FROM
# (
# ...
# )
# GROUP BY item_sk_1, item_sk_2
# -- 'frequently'
# HAVING cnt > {q01_viewed_together_count}
# ORDER BY cnt DESC, item_sk_1, item_sk_2
grouped_df = (
pair_df.groupby(["item_sk_1", "item_sk_2"])
.size()
.reset_index()
.rename(columns={0: "cnt"})
)
grouped_df = grouped_df[grouped_df["cnt"] > q01_viewed_together_count].reset_index(
drop=True
)
### 2017 rows after filteration at sf-100
### should scale till sf-100k
grouped_df = grouped_df.repartition(npartitions=1).persist()
## converting to strings because of issue
# https://github.com/rapidsai/gpu-bdb/issues/36
grouped_df["item_sk_1"] = grouped_df["item_sk_1"].astype("str")
grouped_df["item_sk_2"] = grouped_df["item_sk_2"].astype("str")
grouped_df = grouped_df.map_partitions(
lambda df: df.sort_values(
by=["cnt", "item_sk_1", "item_sk_2"], ascending=[False, True, True]
)
)
grouped_df = grouped_df.reset_index(drop=True)
### below is just 100 rows so should fit on `cudf` context
grouped_df = grouped_df.head(q01_limit)
### writing to int to ensure same values
grouped_df["item_sk_1"] = grouped_df["item_sk_1"].astype("int32")
grouped_df["item_sk_2"] = grouped_df["item_sk_2"].astype("int32")
return grouped_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q01/gpu_bdb_query_01_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
# -------- Q1 -----------
q01_i_category_id_IN = "1, 2, 3"
# -- sf1 -> 11 stores, 90k sales in 820k lines
q01_ss_store_sk_IN = "10, 20, 33, 40, 50"
q01_viewed_together_count = 50
q01_limit = 100
def read_tables(data_dir, bc):
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_distinct = f"""
SELECT DISTINCT ss_item_sk, ss_ticket_number
FROM store_sales s, item i
WHERE s.ss_item_sk = i.i_item_sk
AND i.i_category_id IN ({q01_i_category_id_IN})
AND s.ss_store_sk IN ({q01_ss_store_sk_IN})
"""
result_distinct = bc.sql(query_distinct)
result_distinct = result_distinct.persist()
wait(result_distinct)
bc.create_table("distinct_table", result_distinct)
query = f"""
SELECT item_sk_1, item_sk_2, COUNT(*) AS cnt
FROM
(
SELECT CAST(t1.ss_item_sk as BIGINT) AS item_sk_1,
CAST(t2.ss_item_sk AS BIGINT) AS item_sk_2
FROM distinct_table t1
INNER JOIN distinct_table t2
ON t1.ss_ticket_number = t2.ss_ticket_number
WHERE t1.ss_item_sk < t2.ss_item_sk
)
GROUP BY item_sk_1, item_sk_2
HAVING COUNT(*) > {q01_viewed_together_count}
ORDER BY cnt DESC, CAST(item_sk_1 AS VARCHAR),
CAST(item_sk_2 AS VARCHAR)
LIMIT {q01_limit}
"""
result = bc.sql(query)
bc.drop_table("distinct_table")
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q01/gpu_bdb_query_01_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q01_utils import (
q01_i_category_id_IN,
q01_ss_store_sk_IN,
q01_viewed_together_count,
q01_limit,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_distinct = f"""
SELECT DISTINCT ss_item_sk, ss_ticket_number
FROM store_sales s, item i
WHERE s.ss_item_sk = i.i_item_sk
AND i.i_category_id IN {q01_i_category_id_IN}
AND s.ss_store_sk IN {q01_ss_store_sk_IN}
"""
result_distinct = c.sql(query_distinct)
result_distinct = result_distinct.persist()
wait(result_distinct)
c.create_table("distinct_table", result_distinct, persist=False)
query = f"""
SELECT item_sk_1, item_sk_2, COUNT(*) AS cnt
FROM
(
SELECT CAST(t1.ss_item_sk as BIGINT) AS item_sk_1,
CAST(t2.ss_item_sk AS BIGINT) AS item_sk_2
FROM distinct_table t1
INNER JOIN distinct_table t2
ON t1.ss_ticket_number = t2.ss_ticket_number
WHERE t1.ss_item_sk < t2.ss_item_sk
)
GROUP BY item_sk_1, item_sk_2
HAVING COUNT(*) > {q01_viewed_together_count}
ORDER BY cnt DESC, CAST(item_sk_1 AS VARCHAR),
CAST(item_sk_2 AS VARCHAR)
LIMIT {q01_limit}
"""
result = c.sql(query)
c.drop_table("distinct_table")
return result
@annotate("QUERY1", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q11/README.md | # Query 11
In this query, for a given product, we measure the correlation of sentiments, including the number of reviews and average review ratings, on product monthly revenues within a given time frame.
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q11/gpu_bdb_query_11_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import os
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
def read_tables(data_dir, bc):
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table("product_reviews", os.path.join(data_dir, "product_reviews/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = """
WITH p AS
(
SELECT
pr_item_sk,
count(pr_item_sk) AS r_count,
AVG( CAST(pr_review_rating AS DOUBLE) ) avg_rating
FROM product_reviews
WHERE pr_item_sk IS NOT NULL
GROUP BY pr_item_sk
), s AS
(
SELECT
ws_item_sk
FROM web_sales ws
INNER JOIN date_dim d ON ws.ws_sold_date_sk = d.d_date_sk
WHERE ws_item_sk IS NOT null
AND CAST(d.d_date AS DATE) >= DATE '2003-01-02'
AND CAST(d.d_date AS DATE) <= DATE '2003-02-02'
GROUP BY ws_item_sk
)
SELECT p.r_count AS x,
p.avg_rating AS y
FROM s INNER JOIN p ON p.pr_item_sk = s.ws_item_sk
"""
result = bc.sql(query)
sales_corr = result["x"].corr(result["y"]).compute()
result_df = cudf.DataFrame([sales_corr])
result_df.columns = ["corr(CAST(reviews_count AS DOUBLE), avg_rating)"]
return result_df
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q11/gpu_bdb_query_11.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
convert_datestring_to_days,
)
from bdb_tools.q11_utils import read_tables
import numpy as np
q11_start_date = "2003-01-02"
q11_end_date = "2003-02-02"
def main(client, config):
pr_df, ws_df, date_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
date_df = date_df.map_partitions(convert_datestring_to_days)
# Filter limit in days
min_date = np.datetime64(q11_start_date, "D").astype(int)
max_date = np.datetime64(q11_end_date, "D").astype(int)
date_df = date_df.query(
"d_date>=@min_date and d_date<=@max_date",
meta=date_df._meta,
local_dict={"min_date": min_date, "max_date": max_date},
)
web_sales = ws_df.merge(
date_df, left_on=["ws_sold_date_sk"], right_on=["d_date_sk"], how="inner"
)
# Only take sales that are not null, and get the unique item SKUs
# Note that we do not need the revenue column, so we don't need a groupby aggregation
# Spark *possibly* does this optimization under the hood
web_sales = web_sales[web_sales["ws_item_sk"].notnull()].reset_index(drop=True)
web_sales = web_sales.ws_item_sk.unique().to_frame()
# temporarily avoid reset_index due to a MultiColumn bug
product_reviews = pr_df[pr_df["pr_item_sk"].notnull()].reset_index(drop=True)
product_reviews = product_reviews.groupby("pr_item_sk").agg(
{"pr_review_rating": ["count", "mean"]}
)
product_reviews.columns = ["r_count", "avg_rating"]
# temporarily avoid reset_index due to a MultiColumn bug
sales = web_sales.merge(
product_reviews, left_on=["ws_item_sk"], right_index=True, how="inner"
)
sales = sales.rename(
columns={
"ws_item_sk": "pid",
"r_count": "reviews_count",
"revenue": "m_revenue",
}
).reset_index()
# this is a scalar so can remain a cudf frame
sales_corr = sales["reviews_count"].corr(sales["avg_rating"])
sales_corr = sales_corr.persist()
sales_corr = sales_corr.compute()
result_df = cudf.DataFrame([sales_corr])
result_df.columns = ["corr(CAST(reviews_count AS DOUBLE), avg_rating)"]
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q11/gpu_bdb_query_11_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
import cudf
import dask_cudf
import pandas as pd
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q11_utils import read_tables
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = """
WITH p AS
(
SELECT
pr_item_sk,
count(pr_item_sk) AS r_count,
AVG( CAST(pr_review_rating AS DOUBLE) ) avg_rating
FROM product_reviews
WHERE pr_item_sk IS NOT NULL
GROUP BY pr_item_sk
), s AS
(
SELECT
ws_item_sk
FROM web_sales ws
INNER JOIN date_dim d ON ws.ws_sold_date_sk = d.d_date_sk
WHERE ws_item_sk IS NOT null
AND CAST(d.d_date AS DATE) >= DATE '2003-01-02'
AND CAST(d.d_date AS DATE) <= DATE '2003-02-02'
GROUP BY ws_item_sk
)
SELECT p.r_count AS x,
p.avg_rating AS y
FROM s INNER JOIN p ON p.pr_item_sk = s.ws_item_sk
"""
result = c.sql(query)
sales_corr = result["x"].corr(result["y"]).compute()
if isinstance(result, dask_cudf.DataFrame):
result_df = cudf.DataFrame([sales_corr])
else:
result_df = pd.DataFrame([sales_corr])
result_df.columns = ["corr(CAST(reviews_count AS DOUBLE), avg_rating)"]
return result_df
@annotate("QUERY11", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q15/gpu_bdb_query_15_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
# -------- Q15 -----------
# --store_sales date range
q15_startDate = "2001-09-02"
# --+1year
q15_endDate = "2002-09-02"
q15_store_sk = 10
def read_tables(data_dir, bc):
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
SELECT *
FROM
(
SELECT
cat,
( (count(x) * SUM(xy) - SUM(x) * SUM(y)) / (count(x) * SUM(xx) - SUM(x) * SUM(x)) ) AS slope,
(SUM(y) - ((count(x) * SUM(xy) - SUM(x) * SUM(y)) / (count(x) * SUM(xx) - SUM(x)*SUM(x)) ) * SUM(x)) / count(x) AS intercept
FROM
(
SELECT
i.i_category_id AS cat,
s.ss_sold_date_sk AS x,
CAST(SUM(s.ss_net_paid) AS DOUBLE) AS y,
CAST(s.ss_sold_date_sk * SUM(s.ss_net_paid) AS DOUBLE) AS xy,
CAST(s.ss_sold_date_sk * s.ss_sold_date_sk AS DOUBLE) AS xx
FROM store_sales s
INNER JOIN item i ON s.ss_item_sk = i.i_item_sk
INNER JOIN date_dim d ON s.ss_sold_date_sk = d.d_date_sk
WHERE s.ss_store_sk = {q15_store_sk}
AND i.i_category_id IS NOT NULL
AND CAST(d.d_date AS DATE) >= DATE '{q15_startDate}'
AND CAST(d.d_date AS DATE) <= DATE '{q15_endDate}'
GROUP BY i.i_category_id, s.ss_sold_date_sk
) temp
GROUP BY cat
) regression
WHERE slope <= 0.0
ORDER BY cat
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q15/gpu_bdb_query_15_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q15_utils import (
q15_startDate,
q15_endDate,
q15_store_sk,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = f"""
SELECT *
FROM
(
SELECT
cat,
( (count(x) * SUM(xy) - SUM(x) * SUM(y)) / (count(x) * SUM(xx) - SUM(x) * SUM(x)) ) AS slope,
(SUM(y) - ((count(x) * SUM(xy) - SUM(x) * SUM(y)) / (count(x) * SUM(xx) - SUM(x)*SUM(x)) ) * SUM(x)) / count(x) AS intercept
FROM
(
SELECT
i.i_category_id AS cat,
s.ss_sold_date_sk AS x,
CAST(SUM(s.ss_net_paid) AS DOUBLE) AS y,
CAST(s.ss_sold_date_sk * SUM(s.ss_net_paid) AS DOUBLE) AS xy,
CAST(s.ss_sold_date_sk * s.ss_sold_date_sk AS DOUBLE) AS xx
FROM store_sales s
INNER JOIN item i ON s.ss_item_sk = i.i_item_sk
INNER JOIN date_dim d ON s.ss_sold_date_sk = d.d_date_sk
WHERE s.ss_store_sk = {q15_store_sk}
AND i.i_category_id IS NOT NULL
AND CAST(d.d_date AS DATE) >= DATE '{q15_startDate}'
AND CAST(d.d_date AS DATE) <= DATE '{q15_endDate}'
GROUP BY i.i_category_id, s.ss_sold_date_sk
) temp
GROUP BY cat
) regression
WHERE slope <= 0.0
ORDER BY cat
"""
result = c.sql(query)
return result
@annotate("QUERY15", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q15/gpu_bdb_query_15.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
convert_datestring_to_days,
)
from bdb_tools.q15_utils import (
q15_startDate,
q15_endDate,
q15_store_sk,
store_sales_cols,
read_tables
)
import datetime
def main(client, config):
store_sales_df, date_dim_df, item_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
### Query 0. Filtering store sales
store_sales_df = store_sales_df.query(f"ss_store_sk == {q15_store_sk}")
### Query 1. Date Time Filteration Logic
date_dim_cov_df = date_dim_df.map_partitions(convert_datestring_to_days)
q15_start_dt = datetime.datetime.strptime(q15_startDate, "%Y-%m-%d")
q15_start_dt = (q15_start_dt - datetime.datetime(1970, 1, 1)) / datetime.timedelta(
days=1
)
q15_start_dt = int(q15_start_dt)
q15_end_dt = datetime.datetime.strptime(q15_endDate, "%Y-%m-%d")
q15_end_dt = (q15_end_dt - datetime.datetime(1970, 1, 1)) / datetime.timedelta(
days=1
)
q15_end_dt = int(q15_end_dt)
filtered_date_df = date_dim_cov_df.query(
f"d_date >={q15_start_dt} and d_date <= {q15_end_dt}",
meta=date_dim_cov_df._meta,
).reset_index(drop=True)
store_sales_df = store_sales_df.merge(
filtered_date_df,
left_on=["ss_sold_date_sk"],
right_on=["d_date_sk"],
how="inner",
)
store_sales_df = store_sales_df[store_sales_cols]
#### Query 2. `store_sales_df` inner join `item`
item_df = item_df[item_df["i_category_id"].notnull()].reset_index(drop=True)
store_sales_item_join = store_sales_df.merge(
item_df, left_on=["ss_item_sk"], right_on=["i_item_sk"], how="inner"
)
group_cols = ["i_category_id", "ss_sold_date_sk"]
agg_cols = ["ss_net_paid"]
agg_df = (
store_sales_item_join[agg_cols + group_cols]
.groupby(group_cols)
.agg({"ss_net_paid": "sum"})
)
### The number of categories is know to be limited
agg_df = agg_df.compute()
agg_df = agg_df.reset_index(drop=False)
agg_df = agg_df.rename(
columns={"i_category_id": "cat", "ss_sold_date_sk": "x", "ss_net_paid": "y"}
)
agg_df["xy"] = agg_df["x"] * agg_df["y"]
agg_df["xx"] = agg_df["x"] * agg_df["x"]
### Query 3. Group By Logic
# Ideally we should use `as_index=False` and have a simplified rename call.
# as_index=False doesn't work here: https://github.com/rapidsai/cudf/issues/3737
regression_groups = agg_df.groupby(["cat"]).agg(
{"x": ["count", "sum"], "xx": ["sum"], "xy": ["sum"], "y": ["count", "sum"]}
)
regression_groups.columns = regression_groups.columns.map(
{
("x", "count"): "count_x",
("x", "sum"): "sum_x",
("xx", "sum"): "sum_xx",
("xy", "sum"): "sum_xy",
("y", "count"): "count_y",
("y", "sum"): "sum_y",
}
)
regression_groups["slope"] = (
regression_groups["count_x"] * regression_groups["sum_xy"]
- regression_groups["sum_x"] * regression_groups["sum_y"]
) / (
(
regression_groups["count_x"] * regression_groups["sum_xx"]
- regression_groups["sum_x"] * regression_groups["sum_x"]
)
)
### Applying Regression Formula
regression_groups["intercept"] = (
regression_groups["sum_y"]
- regression_groups["slope"] * regression_groups["sum_x"]
)
regression_groups["intercept"] = (
regression_groups["intercept"] / regression_groups["count_x"]
)
regression_groups = regression_groups[regression_groups["slope"] < 0]
regression_groups = regression_groups.reset_index(drop=False)
regression_groups = regression_groups[["cat", "slope", "intercept"]].sort_values(
by=["cat"]
)
return regression_groups
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q28/gpu_bdb_query_28.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy
import cupy as cp
import copyreg
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q28_utils import (
post_etl_processing,
read_tables
)
def main(client, config):
product_reviews_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
product_reviews_df = product_reviews_df[
product_reviews_df["pr_review_content"].notnull()
]
# 90% train/test split
train_data, test_data = product_reviews_df.random_split([0.9, 0.10])
train_data = train_data.reset_index(drop=True)
test_data = test_data.reset_index(drop=True)
del product_reviews_df
final_data, acc, prec, cmat = post_etl_processing(
client=client, train_data=train_data, test_data=test_data
)
payload = {
"df": final_data,
"acc": acc,
"prec": prec,
"cmat": cmat,
"output_type": "supervised",
}
return payload
def register_serialization():
def serialize_mat_descriptor(m):
return cp.cupy.cusparse.MatDescriptor.create, ()
copyreg.pickle(cp.cupy.cusparse.MatDescriptor, serialize_mat_descriptor)
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q28/README.md | # Query 28
In this query, we train a text classifier for online review sentiment classification (Positive, Negative, Neutral), using 90% of available reviews for training and the remaining 10% for testing.
The classifier accuracy on testing data and classification result for the 10% testing data is displayed as follows:
```python
<reviewSK>, <originalRating>, <classificationResult>
```
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q28/gpu_bdb_query_28_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q28_utils import (
post_etl_processing,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
# 10 % of data
query1 = """
SELECT
pr_review_sk,
pr_review_rating,
pr_review_content
FROM product_reviews
WHERE mod(pr_review_sk, 10) IN (0)
AND pr_review_content IS NOT NULL
ORDER BY pr_review_sk
"""
test_data = c.sql(query1)
# 90 % of data
query2 = """
SELECT
pr_review_sk,
pr_review_rating,
pr_review_content
FROM product_reviews
WHERE mod(pr_review_sk, 10) IN (1,2,3,4,5,6,7,8,9)
AND pr_review_content IS NOT NULL
ORDER BY pr_review_sk
"""
train_data = c.sql(query2)
final_data, acc, prec, cmat = post_etl_processing(
client=client, train_data=train_data, test_data=test_data
)
payload = {
"df": final_data,
"acc": acc,
"prec": prec,
"cmat": cmat,
"output_type": "supervised",
}
return payload
@annotate("QUERY28", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q28/gpu_bdb_query_28_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
import time
from bdb_tools.cluster_startup import attach_to_cluster
from cuml.feature_extraction.text import HashingVectorizer
import cupy
import dask
from distributed import wait
import cupy as cp
import numpy as np
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
N_FEATURES = 2 ** 23 # Spark is doing 2^20
ngram_range = (1, 2)
preprocessor = lambda s:s.str.lower()
norm = None
alternate_sign = False
def gpu_hashing_vectorizer(x):
vec = HashingVectorizer(n_features=N_FEATURES,
alternate_sign=alternate_sign,
ngram_range=ngram_range,
norm=norm,
preprocessor=preprocessor
)
return vec.fit_transform(x)
def map_labels(ser):
import cudf
output_ser = cudf.Series(cudf.core.column.full(size=len(ser), fill_value=2, dtype=np.int32))
zero_flag = (ser==1) | (ser==2)
output_ser.loc[zero_flag]=0
three_flag = (ser==3)
output_ser.loc[three_flag]=1
return output_ser
def build_features(t):
X = t["pr_review_content"]
X = X.map_partitions(
gpu_hashing_vectorizer,
meta=dask.array.from_array(
cupy.sparse.csr_matrix(cupy.zeros(1, dtype=cp.float32))
),
)
X = X.astype(np.float32).persist()
X.compute_chunk_sizes()
return X
def build_labels(reviews_df):
y = reviews_df["pr_review_rating"].map_partitions(map_labels)
y = y.map_partitions(lambda x: cupy.asarray(x, cupy.int32)).persist()
y.compute_chunk_sizes()
return y
def categoricalize(num_sr):
return num_sr.astype("str").str.replace(["0", "1", "2"], ["NEG", "NEUT", "POS"])
def sum_tp_fp(y_y_pred, nclasses):
y, y_pred = y_y_pred
res = cp.zeros((nclasses, 2), order="F")
for i in range(nclasses):
pos_pred_ix = cp.where(y_pred == i)[0]
# short circuit
if len(pos_pred_ix) == 0:
res[i] = 0
break
tp_sum = (y_pred[pos_pred_ix] == y[pos_pred_ix]).sum()
fp_sum = (y_pred[pos_pred_ix] != y[pos_pred_ix]).sum()
res[i][0] = tp_sum
res[i][1] = fp_sum
return res
def precision_score(client, y, y_pred, average="binary"):
from cuml.dask.common.input_utils import DistributedDataHandler
nclasses = len(cp.unique(y.map_blocks(lambda x: cp.unique(x)).compute()))
if average == "binary" and nclasses > 2:
raise ValueError
if nclasses < 2:
raise ValueError("Single class precision is not yet supported")
ddh = DistributedDataHandler.create([y, y_pred])
precision_scores = client.compute(
[
client.submit(sum_tp_fp, part, nclasses, workers=[worker])
for worker, part in ddh.gpu_futures
],
sync=True,
)
res = cp.zeros((nclasses, 2), order="F")
for i in precision_scores:
res += i
if average == "binary" or average == "macro":
prec = cp.zeros(nclasses)
for i in range(nclasses):
tp_sum, fp_sum = res[i]
prec[i] = (tp_sum / (tp_sum + fp_sum)).item()
if average == "binary":
return prec[nclasses - 1].item()
else:
return prec.mean().item()
else:
global_tp = cp.sum(res[:, 0])
global_fp = cp.sum(res[:, 1])
return global_tp / (global_tp + global_fp).item()
def local_cm(y_y_pred, unique_labels, sample_weight):
y_true, y_pred = y_y_pred
labels = unique_labels
n_labels = labels.size
# Assume labels are monotonically increasing for now.
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
if sample_weight is None:
sample_weight = cp.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = cp.asarray(sample_weight)
sample_weight = sample_weight[ind]
cm = cp.sparse.coo_matrix(
(sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels), dtype=cp.float32,
).toarray()
return cp.nan_to_num(cm)
def confusion_matrix(client, y_true, y_pred, normalize=None, sample_weight=None):
from cuml.dask.common.input_utils import DistributedDataHandler
unique_classes = cp.unique(y_true.map_blocks(lambda x: cp.unique(x)).compute())
nclasses = len(unique_classes)
ddh = DistributedDataHandler.create([y_true, y_pred])
cms = client.compute(
[
client.submit(
local_cm, part, unique_classes, sample_weight, workers=[worker]
)
for worker, part in ddh.gpu_futures
],
sync=True,
)
cm = cp.zeros((nclasses, nclasses))
for i in cms:
cm += i
with np.errstate(all="ignore"):
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
cm = cp.nan_to_num(cm)
return cm
def accuracy_score(client, y, y_hat):
from uuid import uuid1
from cuml.dask.common.input_utils import DistributedDataHandler
ddh = DistributedDataHandler.create([y_hat, y])
def _count_accurate_predictions(y_hat_y):
y_hat, y = y_hat_y
y_hat = cp.asarray(y_hat, dtype=y_hat.dtype)
y = cp.asarray(y, dtype=y.dtype)
return y.shape[0] - cp.count_nonzero(y - y_hat)
key = uuid1()
futures = client.compute(
[
client.submit(
_count_accurate_predictions,
worker_future[1],
workers=[worker_future[0]],
key="%s-%s" % (key, idx),
)
for idx, worker_future in enumerate(ddh.gpu_futures)
],
sync=True,
)
return sum(futures) / y.shape[0]
def post_etl_processing(client, train_data, test_data):
import cudf
from cuml.dask.naive_bayes import MultinomialNB as DistMNB
from cuml.dask.common import to_dask_cudf
from cuml.dask.common.input_utils import DistributedDataHandler
# Feature engineering
X_train = build_features(train_data)
X_test = build_features(test_data)
y_train = build_labels(train_data)
y_test = build_labels(test_data)
# Perform ML
model = DistMNB(client=client, alpha=0.001)
model.fit(X_train, y_train)
### this regression seems to be coming from here
test_pred_st = time.time()
y_hat = model.predict(X_test).persist()
# Compute distributed performance metrics
acc = accuracy_score(client, y_test, y_hat)
print("Accuracy: " + str(acc))
prec = precision_score(client, y_test, y_hat, average="macro")
print("Precision: " + str(prec))
cmat = confusion_matrix(client, y_test, y_hat)
print("Confusion Matrix: " + str(cmat))
metric_et = time.time()
# Place results back in original Dataframe
ddh = DistributedDataHandler.create(y_hat)
test_preds = to_dask_cudf(
[client.submit(cudf.Series, part) for w, part in ddh.gpu_futures]
)
test_preds = test_preds.map_partitions(categoricalize)
test_data["prediction"] = test_preds
final_data = test_data[["pr_review_sk", "pr_review_rating", "prediction"]].persist()
final_data = final_data.sort_values("pr_review_sk").reset_index(drop=True)
wait(final_data)
return final_data, acc, prec, cmat
def read_tables(data_dir, bc):
bc.create_table("product_reviews", os.path.join(data_dir, "product_reviews/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
# 10 % of data
query1 = """
SELECT
pr_review_sk,
pr_review_rating,
pr_review_content
FROM product_reviews
WHERE mod(pr_review_sk, 10) IN (0)
AND pr_review_content IS NOT NULL
ORDER BY pr_review_sk
"""
test_data = bc.sql(query1)
# 90 % of data
query2 = """
SELECT
pr_review_sk,
pr_review_rating,
pr_review_content
FROM product_reviews
WHERE mod(pr_review_sk, 10) IN (1,2,3,4,5,6,7,8,9)
AND pr_review_content IS NOT NULL
ORDER BY pr_review_sk
"""
train_data = bc.sql(query2)
final_data, acc, prec, cmat = post_etl_processing(
client=client, train_data=train_data, test_data=test_data
)
payload = {
"df": final_data,
"acc": acc,
"prec": prec,
"cmat": cmat,
"output_type": "supervised",
}
return payload
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q14/gpu_bdb_query_14.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q14_utils import read_tables
def main(client, config):
q14_dependents = 5
q14_morning_startHour = 7
q14_morning_endHour = 8
q14_evening_startHour = 19
q14_evening_endHour = 20
q14_content_len_min = 5000
q14_content_len_max = 6000
web_sales, household_demographics, web_page, time_dim = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
household_demographics = household_demographics.query(
"hd_dep_count==@q14_dependents",
meta=household_demographics._meta,
local_dict={"q14_dependents": q14_dependents},
).reset_index(drop=True)
output_table = web_sales.merge(
household_demographics,
left_on=["ws_ship_hdemo_sk"],
right_on=["hd_demo_sk"],
how="inner",
)
output_table = output_table.drop(
columns=["ws_ship_hdemo_sk", "hd_demo_sk", "hd_dep_count"]
)
web_page = web_page.query(
"wp_char_count>=@q14_content_len_min and wp_char_count<=@q14_content_len_max",
meta=web_page._meta,
local_dict={
"q14_content_len_min": q14_content_len_min,
"q14_content_len_max": q14_content_len_max,
},
).reset_index(drop=True)
output_table = output_table.merge(
web_page, left_on=["ws_web_page_sk"], right_on=["wp_web_page_sk"], how="inner"
)
output_table = output_table.drop(
columns=["ws_web_page_sk", "wp_web_page_sk", "wp_char_count"]
)
time_dim = time_dim.query(
"t_hour==@q14_morning_startHour or t_hour==@q14_morning_endHour or t_hour==@q14_evening_startHour or t_hour==@q14_evening_endHour",
meta=time_dim._meta,
local_dict={
"q14_morning_startHour": q14_morning_startHour,
"q14_morning_endHour": q14_morning_endHour,
"q14_evening_startHour": q14_evening_startHour,
"q14_evening_endHour": q14_evening_endHour,
},
)
output_table = output_table.merge(
time_dim, left_on=["ws_sold_time_sk"], right_on=["t_time_sk"], how="inner"
)
output_table = output_table.drop(columns=["ws_sold_time_sk", "t_time_sk"])
output_table["am"] = (output_table["t_hour"] >= q14_morning_startHour) & (
output_table["t_hour"] <= q14_morning_endHour
).reset_index(drop=True)
output_table["pm"] = (output_table["t_hour"] >= q14_evening_startHour) & (
output_table["t_hour"] <= q14_evening_endHour
).reset_index(drop=True)
am_pm_ratio = output_table["am"].sum() / output_table["pm"].sum()
# result is a scalor
am_pm_ratio = am_pm_ratio.persist()
result = am_pm_ratio.compute()
if np.isinf(result):
result = -1.0
print(result)
# result is a scalor
result_df = cudf.DataFrame({"am_pm_ratio": result})
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q14/gpu_bdb_query_14_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q14_utils import read_tables
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = """
SELECT CASE WHEN pmc > 0.0 THEN CAST (amc AS DOUBLE) / CAST (pmc AS DOUBLE) ELSE -1.0 END AS am_pm_ratio
FROM
(
SELECT SUM(amc1) AS amc, SUM(pmc1) AS pmc
FROM
(
SELECT
CASE WHEN t_hour BETWEEN 7 AND 8 THEN COUNT(1) ELSE 0 END AS amc1,
CASE WHEN t_hour BETWEEN 19 AND 20 THEN COUNT(1) ELSE 0 END AS pmc1
FROM web_sales ws
JOIN household_demographics hd ON (hd.hd_demo_sk = ws.ws_ship_hdemo_sk and hd.hd_dep_count = 5)
JOIN web_page wp ON (wp.wp_web_page_sk = ws.ws_web_page_sk and wp.wp_char_count BETWEEN 5000 AND 6000)
JOIN time_dim td ON (td.t_time_sk = ws.ws_sold_time_sk and td.t_hour IN (7,8,19,20))
GROUP BY t_hour
) cnt_am_pm
) sum_am_pm
"""
result = c.sql(query)
return result
@annotate("QUERY14", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q14/gpu_bdb_query_14_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from blazingsql import BlazingContext
from bdb_tools.cluster_startup import attach_to_cluster
from dask_cuda import LocalCUDACluster
from dask.distributed import Client
import os
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
def read_tables(data_dir, bc):
bc.create_table(
"household_demographics", os.path.join(data_dir, "household_demographics/*.parquet"
))
bc.create_table("web_page", os.path.join(data_dir, "web_page/*.parquet"))
bc.create_table("web_sales", os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table("time_dim", os.path.join(data_dir, "time_dim/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = """
SELECT CASE WHEN pmc > 0.0 THEN CAST (amc AS DOUBLE) / CAST (pmc AS DOUBLE) ELSE -1.0 END AS am_pm_ratio
FROM
(
SELECT SUM(amc1) AS amc, SUM(pmc1) AS pmc
FROM
(
SELECT
CASE WHEN t_hour BETWEEN 7 AND 8 THEN COUNT(1) ELSE 0 END AS amc1,
CASE WHEN t_hour BETWEEN 19 AND 20 THEN COUNT(1) ELSE 0 END AS pmc1
FROM web_sales ws
JOIN household_demographics hd ON (hd.hd_demo_sk = ws.ws_ship_hdemo_sk and hd.hd_dep_count = 5)
JOIN web_page wp ON (wp.wp_web_page_sk = ws.ws_web_page_sk and wp.wp_char_count BETWEEN 5000 AND 6000)
JOIN time_dim td ON (td.t_time_sk = ws.ws_sold_time_sk and td.t_hour IN (7,8,19,20))
GROUP BY t_hour
) cnt_am_pm
) sum_am_pm
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q18/gpu_bdb_query_18_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
import numpy as np
import cupy as cp
from bdb_tools.text import create_sentences_from_reviews, create_words_from_sentences
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from dask.distributed import wait
# -------- Q18 -----------
q18_startDate = "2001-05-02"
# --+90days
q18_endDate = "2001-09-02"
EOL_CHAR = "è"
def create_found_reshaped_with_global_pos(found, targets):
"""Given the dataframe created by mapping find_targets_in_reviews,
create a new dataframe in which the nonzero values in each row are exploded
to get their own row. Each row will contain the word, its mapping in the column order,
and the pr_review_sk for the review from which it came.
Having these as two separate functions makes managing dask metadata easier.
"""
import cudf
target_df = cudf.DataFrame({"word": targets}).reset_index(drop=False)
target_df.columns = ["word_mapping", "word"]
df_clean = found.drop(["pr_review_sk"], axis=1)
row_idxs, col_idxs = df_clean.values.nonzero()
found_reshaped = cudf.DataFrame(
{"word_mapping": col_idxs, "pr_review_sk": found["pr_review_sk"].iloc[row_idxs]}
)
found_reshaped = found_reshaped.merge(target_df, on="word_mapping", how="inner")[
["word", "pr_review_sk"]
]
return found_reshaped
def find_targets_in_reviews_helper(ddf, targets, str_col_name="pr_review_content"):
"""returns a N x K matrix, where N is the number of rows in ddf that
contain one of the target words and K is the number of words in targets.
If a target word is found in a review, the value in that row, column
is non-zero.
At the end, any row with non-zero values is returned.
"""
import cudf
from cudf._lib.strings import find_multiple
lowered = ddf[str_col_name].str.lower()
## TODO: Do the replace/any in cupy land before going to cuDF
resdf = cudf.DataFrame(
cp.asarray(
find_multiple.find_multiple(lowered._column, targets._column)
).reshape(-1, len(targets))
)
resdf = resdf.replace([0, -1], [1, 0])
found_mask = resdf.any(axis=1)
resdf["pr_review_sk"] = ddf["pr_review_sk"]
found = resdf.loc[found_mask]
return create_found_reshaped_with_global_pos(found, targets)
def find_relevant_reviews(df, targets, str_col_name="pr_review_content"):
"""
This function finds the reviews containg target stores and returns the
relevant reviews
"""
import cudf
targets = cudf.Series(targets)
targets_lower = targets.str.lower()
reviews_found = find_targets_in_reviews_helper(df, targets_lower)[
["word", "pr_review_sk"]
]
combined = reviews_found.merge(
df[["pr_review_date", "pr_review_sk"]], how="inner", on=["pr_review_sk"]
)
return combined
def read_tables(data_dir, bc):
bc.create_table("store", os.path.join(data_dir, "store/*.parquet"))
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("product_reviews", os.path.join(data_dir, "product_reviews/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = f"""
WITH temp_table1 AS
(
SELECT CAST(s.s_store_sk AS INTEGER) AS s_store_sk,
s.s_store_name ,
CAST(s.s_store_sk AS VARCHAR) || '_' || s.s_store_name
AS store_ID
FROM store s,
(
SELECT temp.ss_store_sk,
((count(temp.x) * SUM(temp.xy) - SUM(temp.x) * SUM(temp.y))
/ (count(temp.x) * SUM(temp.xx) - SUM(temp.x) * SUM(temp.x))
) AS slope
FROM
(
SELECT
s.ss_store_sk,
s.ss_sold_date_sk AS x,
CAST( SUM(s.ss_net_paid) AS DOUBLE) AS y,
s.ss_sold_date_sk * SUM(s.ss_net_paid) AS xy,
s.ss_sold_date_sk * s.ss_sold_date_sk AS xx
FROM store_sales s
WHERE EXISTS
(
SELECT * -- d_date_sk
FROM date_dim d
WHERE s.ss_sold_date_sk = d.d_date_sk
AND CAST(d.d_date AS DATE) >= DATE '{q18_startDate}'
AND CAST(d.d_date AS DATE) <= DATE '{q18_endDate}'
)
GROUP BY s.ss_store_sk, s.ss_sold_date_sk
) temp
GROUP BY temp.ss_store_sk
) regression_analysis
WHERE slope <= 0 --flat or declining sales
AND s.s_store_sk = regression_analysis.ss_store_sk
)
SELECT * FROM temp_table1
"""
stores_with_regression = bc.sql(query_1)
query_2 = """
SELECT pr_review_date,
pr_review_content,
CAST(pr_review_sk AS INTEGER) AS pr_review_sk
FROM product_reviews
WHERE pr_review_content IS NOT NULL
ORDER BY pr_review_date, pr_review_content, pr_review_sk
"""
no_nulls = bc.sql(query_2)
targets = (
stores_with_regression.s_store_name.str.lower()
.unique()
.compute()
.to_arrow()
.to_pylist()
)
# perssiting because no_nulls is used twice
no_nulls = no_nulls.persist()
import cudf
temp_table2_meta_empty_df = cudf.DataFrame(
{
"word": ["a"],
"pr_review_sk": np.ones(1, dtype=np.int64),
"pr_review_date": ["a"],
}
).head(0)
# get relevant reviews
combined = no_nulls.map_partitions(
find_relevant_reviews, targets, meta=temp_table2_meta_empty_df,
)
no_nulls["pr_review_content"] = no_nulls.pr_review_content.str.replace(
[". ", "? ", "! "], [EOL_CHAR], regex=False
)
stores_with_regression["store_ID"] = stores_with_regression.s_store_sk.astype(
"str"
).str.cat(stores_with_regression.s_store_name, sep="_")
stores_with_regression[
"s_store_name"
] = stores_with_regression.s_store_name.str.lower()
stores_with_regression = stores_with_regression.persist()
wait(stores_with_regression)
bc.create_table("stores_with_regression", stores_with_regression)
combined = combined.persist()
wait(combined)
bc.create_table("combined", combined)
query_3 = """
SELECT store_ID,
pr_review_date,
CAST(pr_review_sk AS INTEGER) AS pr_review_sk
FROM stores_with_regression
INNER JOIN combined ON s_store_name = word
"""
temp_table2 = bc.sql(query_3)
bc.drop_table("stores_with_regression")
del stores_with_regression
bc.drop_table("combined")
del combined
# REAL QUERY
sentences = no_nulls.map_partitions(create_sentences_from_reviews)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
# This txt file comes from the official TPCx-BB kit
# We extracted it from bigbenchqueriesmr.jar
# Need to pass the absolute path for this txt file
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
bc.create_table("sent_df",
os.path.join(sentiment_dir, "negativeSentiment.txt"),
names=["sentiment_word"],
dtype=["str"],
file_format="csv")
word_df = word_df.persist()
wait(word_df)
bc.create_table("word_df", word_df)
sentences = sentences.persist()
wait(sentences)
bc.create_table("sentences", sentences)
temp_table2 = temp_table2.persist()
wait(temp_table2)
bc.create_table("temp_table2", temp_table2)
query_4 = """
WITH sentences_table AS
(
select sentence,
review_idx_global_pos,
CAST(sentence_tokenized_global_pos AS BIGINT) AS
sentence_tokenized_global_pos
from sentences
), negativeSentiment AS
(
SELECT DISTINCT sentiment_word AS word
FROM sent_df
), word_sentence_sentiment AS
(
SELECT n.word,
CAST(wd.sentence_idx_global_pos AS BIGINT) AS
sentence_idx_global_pos,
'NEG' AS sentiment
FROM word_df wd
INNER JOIN negativeSentiment n ON wd.word = n.word
), word_sentence_sentiment_with_sentence_info AS
(
SELECT * FROM word_sentence_sentiment
LEFT JOIN sentences_table
ON sentence_idx_global_pos = sentence_tokenized_global_pos
)
SELECT tt2.store_ID AS s_name,
tt2.pr_review_date AS r_date,
wsswsi.sentence AS r_sentence,
wsswsi.sentiment AS sentiment,
wsswsi.word AS sentiment_word
FROM word_sentence_sentiment_with_sentence_info wsswsi
INNER JOIN temp_table2 tt2
ON wsswsi.review_idx_global_pos = tt2.pr_review_sk
ORDER BY s_name, r_date, r_sentence, sentiment_word
"""
result = bc.sql(query_4)
bc.drop_table("word_df")
del word_df
bc.drop_table("sentences")
del sentences
bc.drop_table("temp_table2")
del temp_table2
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q18/gpu_bdb_query_18_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
import os
from bdb_tools.cluster_startup import attach_to_cluster
import numpy as np
import dask_cudf
import dask.dataframe as dd
from bdb_tools.text import create_sentences_from_reviews, create_words_from_sentences
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q18_utils import (
find_relevant_reviews,
q18_startDate,
q18_endDate,
EOL_CHAR,
read_tables
)
from dask.distributed import wait
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = f"""
WITH temp_table1 AS
(
SELECT CAST(s.s_store_sk AS INTEGER) AS s_store_sk,
s.s_store_name ,
CAST(s.s_store_sk AS VARCHAR) || '_' || s.s_store_name
AS store_ID
FROM store s,
(
SELECT temp.ss_store_sk,
((count(temp.x) * SUM(temp.xy) - SUM(temp.x) * SUM(temp.y))
/ (count(temp.x) * SUM(temp.xx) - SUM(temp.x) * SUM(temp.x))
) AS slope
FROM
(
SELECT
s.ss_store_sk,
s.ss_sold_date_sk AS x,
CAST( SUM(s.ss_net_paid) AS DOUBLE) AS y,
s.ss_sold_date_sk * SUM(s.ss_net_paid) AS xy,
s.ss_sold_date_sk * s.ss_sold_date_sk AS xx
FROM store_sales s
WHERE EXISTS
(
SELECT * -- d_date_sk
FROM date_dim d
WHERE s.ss_sold_date_sk = d.d_date_sk
AND CAST(d.d_date AS DATE) >= DATE '{q18_startDate}'
AND CAST(d.d_date AS DATE) <= DATE '{q18_endDate}'
)
GROUP BY s.ss_store_sk, s.ss_sold_date_sk
) temp
GROUP BY temp.ss_store_sk
) regression_analysis
WHERE slope <= 0 --flat or declining sales
AND s.s_store_sk = regression_analysis.ss_store_sk
)
SELECT * FROM temp_table1
"""
stores_with_regression = c.sql(query_1)
query_2 = """
SELECT pr_review_date,
pr_review_content,
CAST(pr_review_sk AS INTEGER) AS pr_review_sk
FROM product_reviews
WHERE pr_review_content IS NOT NULL
ORDER BY pr_review_date, pr_review_content, pr_review_sk
"""
no_nulls = c.sql(query_2)
targets = (
stores_with_regression.s_store_name.str.lower()
.unique()
.compute()
)
if isinstance(no_nulls, dask_cudf.DataFrame):
targets = targets.to_arrow().to_pylist()
else:
targets = targets.tolist()
# perssiting because no_nulls is used twice
no_nulls = no_nulls.persist()
import cudf
import pandas as pd
if isinstance(no_nulls, dask_cudf.DataFrame):
temp_table2_meta_empty_df = cudf.DataFrame(
{
"word": ["a"],
"pr_review_sk": np.ones(1, dtype=np.int64),
"pr_review_date": ["a"],
}
).head(0)
else:
temp_table2_meta_empty_df = pd.DataFrame(
{
"word": ["a"],
"pr_review_sk": np.ones(1, dtype=np.int64),
"pr_review_date": ["a"],
}
).head(0)
# get relevant reviews
combined = no_nulls.map_partitions(
find_relevant_reviews, targets, meta=temp_table2_meta_empty_df,
)
for char in [". ", "? ", "! "]:
no_nulls["pr_review_content"] = no_nulls.pr_review_content.str.replace(char, EOL_CHAR, regex=False)
stores_with_regression["store_ID"] = stores_with_regression.s_store_sk.astype(
"str"
).str.cat(stores_with_regression.s_store_name, sep="_")
stores_with_regression[
"s_store_name"
] = stores_with_regression.s_store_name.str.lower()
stores_with_regression = stores_with_regression.persist()
wait(stores_with_regression)
c.create_table("stores_with_regression", stores_with_regression, persist=False)
combined = combined.persist()
wait(combined)
c.create_table("combined", combined, persist=False)
query_3 = """
SELECT store_ID,
pr_review_date,
CAST(pr_review_sk AS INTEGER) AS pr_review_sk
FROM stores_with_regression
INNER JOIN combined ON s_store_name = word
"""
temp_table2 = c.sql(query_3)
temp_table2["pr_review_sk"] = temp_table2["pr_review_sk"].astype("int32")
c.drop_table("stores_with_regression")
del stores_with_regression
c.drop_table("combined")
del combined
# REAL QUERY
sentences = no_nulls.map_partitions(create_sentences_from_reviews)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
# This txt file comes from the official TPCx-BB kit
# We extracted it from bigbenchqueriesmr.jar
# Need to pass the absolute path for this txt file
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
if isinstance(word_df, dask_cudf.DataFrame):
ns_df = dask_cudf.read_csv(os.path.join(sentiment_dir, "negativeSentiment.txt"), names=["sentiment_word"])
else:
ns_df = dd.read_csv(os.path.join(sentiment_dir, "negativeSentiment.txt"), names=["sentiment_word"])
c.create_table('sent_df', ns_df, persist=False)
word_df = word_df.persist()
wait(word_df)
c.create_table("word_df", word_df, persist=False)
sentences = sentences.persist()
wait(sentences)
c.create_table("sentences", sentences, persist=False)
temp_table2 = temp_table2.persist()
wait(temp_table2)
c.create_table("temp_table2", temp_table2, persist=False)
query_4 = """
WITH sentences_table AS
(
select sentence,
review_idx_global_pos,
CAST(sentence_tokenized_global_pos AS BIGINT) AS
sentence_tokenized_global_pos
from sentences
), negativeSentiment AS
(
SELECT DISTINCT sentiment_word AS word
FROM sent_df
), word_sentence_sentiment AS
(
SELECT n.word,
CAST(wd.sentence_idx_global_pos AS BIGINT) AS
sentence_idx_global_pos,
'NEG' AS sentiment
FROM word_df wd
INNER JOIN negativeSentiment n ON wd.word = n.word
), word_sentence_sentiment_with_sentence_info AS
(
SELECT * FROM word_sentence_sentiment
LEFT JOIN sentences_table
ON sentence_idx_global_pos = sentence_tokenized_global_pos
)
SELECT tt2.store_ID AS s_name,
tt2.pr_review_date AS r_date,
wsswsi.sentence AS r_sentence,
wsswsi.sentiment AS sentiment,
wsswsi.word AS sentiment_word
FROM word_sentence_sentiment_with_sentence_info wsswsi
INNER JOIN temp_table2 tt2
ON wsswsi.review_idx_global_pos = tt2.pr_review_sk
ORDER BY s_name, r_date, r_sentence, sentiment_word
"""
result = c.sql(query_4)
c.drop_table("word_df")
del word_df
c.drop_table("sentences")
del sentences
c.drop_table("temp_table2")
del temp_table2
return result
@annotate("QUERY18", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q18/README.md | # Query 18
In this query, We identify the stores with flat or declining sales in 4 consecutive months and check if there are any negative reviews regarding these stores available online.
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q18/gpu_bdb_query_18.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import cudf
import dask_cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
left_semi_join,
run_query,
)
from bdb_tools.text import (
create_sentences_from_reviews,
create_words_from_sentences,
)
from bdb_tools.q18_utils import (
find_relevant_reviews,
q18_startDate,
q18_endDate,
EOL_CHAR,
read_tables
)
import numpy as np
from distributed import wait
TEMP_TABLE1 = "TEMP_TABLE1"
def main(client, config):
store_sales, date_dim, store, product_reviews = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
### adding a wait call slows this down by 3-4 seconds, removing it for now
### Make TEMP_TABLE1
# filter date table
q18_startDate_int = np.datetime64(q18_startDate, "ms").astype(int)
q18_endDate_int = np.datetime64(q18_endDate, "ms").astype(int)
date_dim_filtered = date_dim.loc[
(date_dim.d_date.astype("datetime64[ms]").astype("int") >= q18_startDate_int)
& (date_dim.d_date.astype("datetime64[ms]").astype("int") <= q18_endDate_int)
].reset_index(drop=True)
# build the regression_analysis table
ss_date_dim_join = left_semi_join(
store_sales,
date_dim_filtered,
left_on=["ss_sold_date_sk"],
right_on=["d_date_sk"],
)
temp = (
ss_date_dim_join.groupby(["ss_store_sk", "ss_sold_date_sk"],)
.agg({"ss_net_paid": "sum"})
.reset_index()
)
temp["xx"] = temp.ss_sold_date_sk * temp.ss_sold_date_sk
temp["xy"] = temp.ss_sold_date_sk * temp.ss_net_paid
temp.columns = ["ss_store_sk", "x", "y", "xx", "xy"]
regression_analysis = (
temp.groupby(["ss_store_sk"])
.agg({"x": ["count", "sum"], "xy": "sum", "y": "sum", "xx": "sum"})
.reset_index(drop=False)
)
regression_analysis["slope"] = (
regression_analysis[("x", "count")] * regression_analysis[("xy", "sum")]
- regression_analysis[("x", "sum")] * regression_analysis[("y", "sum")]
) / (
regression_analysis[("x", "count")] * regression_analysis[("xx", "sum")]
- regression_analysis[("x", "sum")] * regression_analysis[("x", "sum")]
)
regression_analysis = regression_analysis[["ss_store_sk", "slope"]]
regression_analysis.columns = ["ss_store_sk", "slope"]
regression_analysis["ss_store_sk"] = regression_analysis["ss_store_sk"].astype(
"int32"
)
store["s_store_sk"] = store["s_store_sk"].astype("int32")
temp_table1 = store.merge(
regression_analysis[["ss_store_sk", "slope"]]
.query("slope <= 0")
.reset_index(drop=True),
left_on="s_store_sk",
right_on="ss_store_sk",
)
temp_table1 = temp_table1[["s_store_sk", "s_store_name"]]
# repartition this table to be one partition, since its only 192 at SF1000
temp_table1 = temp_table1.repartition(npartitions=1)
temp_table1 = temp_table1.persist()
### Make TEMP_TABLE2
stores_with_regression = temp_table1
pr = product_reviews
# known to be small. very few relevant stores (169) at SF1000
targets = (
stores_with_regression.s_store_name.str.lower()
.unique()
.compute()
.to_arrow()
.to_pylist()
)
no_nulls = pr[~pr.pr_review_content.isnull()].reset_index(drop=True)
no_nulls["pr_review_sk"] = no_nulls["pr_review_sk"].astype("int32")
### perssiting because no_nulls is used twice
no_nulls = no_nulls.reset_index(drop=True).persist()
temp_table2_meta_empty_df = cudf.DataFrame(
{
"word": ["a"],
"pr_review_sk": np.ones(1, dtype=np.int64),
"pr_review_date": ["a"],
}
).head(0)
### get relevant reviews
combined = no_nulls.map_partitions(
find_relevant_reviews, targets, meta=temp_table2_meta_empty_df,
)
stores_with_regression["store_ID"] = stores_with_regression.s_store_sk.astype(
"str"
).str.cat(stores_with_regression.s_store_name, sep="_")
stores_with_regression[
"s_store_name"
] = stores_with_regression.s_store_name.str.lower()
# Keep this commented line to illustrate that we could exactly match Spark
# temp_table2 = temp_table2[['store_ID', 'pr_review_date', 'pr_review_content']]
temp_table2 = combined.merge(
stores_with_regression, how="inner", left_on=["word"], right_on=["s_store_name"]
)
temp_table2 = temp_table2[["store_ID", "pr_review_date", "pr_review_sk"]]
temp_table2 = temp_table2.persist()
### REAL QUERY (PART THREE)
no_nulls["pr_review_content"] = no_nulls.pr_review_content.str.replace(
[". ", "? ", "! "], [EOL_CHAR], regex=False
)
sentences = no_nulls.map_partitions(create_sentences_from_reviews)
# need the global position in the sentence tokenized df
sentences["x"] = 1
sentences["sentence_tokenized_global_pos"] = sentences.x.cumsum()
del sentences["x"]
# This file comes from the official TPCx-BB kit
# We extracted it from bigbenchqueriesmr.jar
sentiment_dir = os.path.join(config["data_dir"], "sentiment_files")
with open(os.path.join(sentiment_dir, "negativeSentiment.txt")) as fh:
negativeSentiment = list(map(str.strip, fh.readlines()))
# dedupe for one extra record in the source file
negativeSentiment = list(set(negativeSentiment))
word_df = sentences.map_partitions(
create_words_from_sentences,
global_position_column="sentence_tokenized_global_pos",
)
sent_df = cudf.DataFrame({"word": negativeSentiment})
sent_df["sentiment"] = "NEG"
sent_df = dask_cudf.from_cudf(sent_df, npartitions=1)
word_sentence_sentiment = word_df.merge(sent_df, how="inner", on="word")
word_sentence_sentiment["sentence_idx_global_pos"] = word_sentence_sentiment[
"sentence_idx_global_pos"
].astype("int64")
sentences["sentence_tokenized_global_pos"] = sentences[
"sentence_tokenized_global_pos"
].astype("int64")
word_sentence_sentiment_with_sentence_info = word_sentence_sentiment.merge(
sentences,
how="left",
left_on="sentence_idx_global_pos",
right_on="sentence_tokenized_global_pos",
)
temp_table2["pr_review_sk"] = temp_table2["pr_review_sk"].astype("int32")
final = word_sentence_sentiment_with_sentence_info.merge(
temp_table2[["store_ID", "pr_review_date", "pr_review_sk"]],
how="inner",
left_on="review_idx_global_pos",
right_on="pr_review_sk",
)
keepcols = ["store_ID", "pr_review_date", "sentence", "sentiment", "word"]
final = final[keepcols]
final.columns = ["s_name", "r_date", "r_sentence", "sentiment", "sentiment_word"]
final = final.persist()
wait(final)
final = final.sort_values(["s_name", "r_date", "r_sentence", "sentiment_word"])
final = final.persist()
wait(final)
return final
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q06/gpu_bdb_query_06.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q06_utils import (
q06_YEAR,
q06_LIMIT,
read_tables
)
def get_sales_ratio(df, table="store_sales"):
assert table in ("store_sales", "web_sales")
if table == "store_sales":
column_prefix = "ss_"
else:
column_prefix = "ws_"
f_year = q06_YEAR
s_year = q06_YEAR + 1
first_year_flag = df["d_year"] == f_year
second_year_flag = df["d_year"] == s_year
df["first_year_sales"] = 0.00
df["first_year_sales"][first_year_flag] = (
(
df[f"{column_prefix}ext_list_price"][first_year_flag]
- df[f"{column_prefix}ext_wholesale_cost"][first_year_flag]
- df[f"{column_prefix}ext_discount_amt"][first_year_flag]
)
+ df[f"{column_prefix}ext_sales_price"][first_year_flag]
) / 2
df["second_year_sales"] = 0.00
df["second_year_sales"][second_year_flag] = (
(
df[f"{column_prefix}ext_list_price"][second_year_flag]
- df[f"{column_prefix}ext_wholesale_cost"][second_year_flag]
- df[f"{column_prefix}ext_discount_amt"][second_year_flag]
)
+ df[f"{column_prefix}ext_sales_price"][second_year_flag]
) / 2
return df
def main(client, config):
ws_df, ss_df, date_df, customer_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
filtered_date_df = date_df.query(
f"d_year >= {q06_YEAR} and d_year <= {q06_YEAR+1}", meta=date_df._meta
).reset_index(drop=True)
web_sales_df = ws_df.merge(
filtered_date_df, left_on="ws_sold_date_sk", right_on="d_date_sk", how="inner"
)
ws_grouped_df = (
web_sales_df.groupby(by=["ws_bill_customer_sk", "d_year"])
.agg(
{
"ws_ext_list_price": "sum",
"ws_ext_wholesale_cost": "sum",
"ws_ext_discount_amt": "sum",
"ws_ext_sales_price": "sum",
}
)
.reset_index()
)
web_sales_ratio_df = ws_grouped_df.map_partitions(
get_sales_ratio, table="web_sales"
)
web_sales = (
web_sales_ratio_df.groupby(["ws_bill_customer_sk"])
.agg({"first_year_sales": "sum", "second_year_sales": "sum"})
.reset_index()
)
web_sales = web_sales.loc[web_sales["first_year_sales"] > 0].reset_index(drop=True)
web_sales = web_sales.rename(
columns={
"first_year_sales": "first_year_total_web",
"second_year_sales": "second_year_total_web",
}
)
store_sales_df = ss_df.merge(
filtered_date_df, left_on="ss_sold_date_sk", right_on="d_date_sk", how="inner"
)
ss_grouped_df = (
store_sales_df.groupby(by=["ss_customer_sk", "d_year"])
.agg(
{
"ss_ext_list_price": "sum",
"ss_ext_wholesale_cost": "sum",
"ss_ext_discount_amt": "sum",
"ss_ext_sales_price": "sum",
}
)
.reset_index()
)
store_sales_ratio_df = ss_grouped_df.map_partitions(
get_sales_ratio, table="store_sales"
)
store_sales = (
store_sales_ratio_df.groupby(["ss_customer_sk"])
.agg({"first_year_sales": "sum", "second_year_sales": "sum"})
.reset_index()
)
store_sales = store_sales.loc[store_sales["first_year_sales"] > 0].reset_index(
drop=True
)
store_sales = store_sales.rename(
columns={
"first_year_sales": "first_year_total_store",
"second_year_sales": "second_year_total_store",
}
)
# SQL "AS"
sales_df = web_sales.merge(
store_sales,
left_on="ws_bill_customer_sk",
right_on="ss_customer_sk",
how="inner",
)
sales_df["web_sales_increase_ratio"] = (
sales_df["second_year_total_web"] / sales_df["first_year_total_web"]
)
# Join the customer with the combined web and store sales.
customer_df["c_customer_sk"] = customer_df["c_customer_sk"].astype("int64")
sales_df["ws_bill_customer_sk"] = sales_df["ws_bill_customer_sk"].astype("int64")
sales_df = sales_df.merge(
customer_df,
left_on="ws_bill_customer_sk",
right_on="c_customer_sk",
how="inner",
).reset_index(drop=True)
keep_cols = [
"ws_bill_customer_sk",
"web_sales_increase_ratio",
"c_email_address",
"c_first_name",
"c_last_name",
"c_preferred_cust_flag",
"c_birth_country",
"c_login",
]
sales_df = sales_df[keep_cols]
sales_df = sales_df.rename(columns={"ws_bill_customer_sk": "c_customer_sk"})
# sales_df is 514,291 rows at SF-100 and 3,031,718 at SF-1000
# We cant sort descending in Dask right now, anyway
sales_df = sales_df.repartition(npartitions=1).persist()
result_df = sales_df.reset_index(drop=True)
result_df = result_df.map_partitions(
lambda df: df.sort_values(
by=[
"web_sales_increase_ratio",
"c_customer_sk",
"c_first_name",
"c_last_name",
"c_preferred_cust_flag",
"c_birth_country",
"c_login",
],
ascending=False,
)
)
return result_df.head(q06_LIMIT)
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q06/gpu_bdb_query_06_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
# -------- Q6 -----------
q06_LIMIT = 100
# --web_sales and store_sales date
q06_YEAR = 2001
def read_tables(data_dir, bc):
bc.create_table('web_sales', os.path.join(data_dir, "web_sales/*.parquet"))
bc.create_table('store_sales', os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table('date_dim', os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table('customer', os.path.join(data_dir, "customer/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
WITH temp_table_1 as
(
SELECT ss_customer_sk AS customer_sk,
sum( case when (d_year = {q06_YEAR}) THEN (((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2.0) ELSE 0.0 END)
AS first_year_total,
sum( case when (d_year = {q06_YEAR + 1}) THEN (((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2.0) ELSE 0.0 END)
AS second_year_total
FROM store_sales,
date_dim
WHERE ss_sold_date_sk = d_date_sk
AND d_year BETWEEN {q06_YEAR} AND {q06_YEAR + 1}
GROUP BY ss_customer_sk
-- first_year_total is an aggregation, rewrite all sum () statement
HAVING sum( case when (d_year = {q06_YEAR}) THEN (((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2.0) ELSE 0.0 END) > 0.0
),
temp_table_2 AS
(
SELECT ws_bill_customer_sk AS customer_sk ,
sum( case when (d_year = {q06_YEAR}) THEN (((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2.0) ELSE 0.0 END)
AS first_year_total,
sum( case when (d_year = {q06_YEAR + 1}) THEN (((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2.0) ELSE 0.0 END)
AS second_year_total
FROM web_sales,
date_dim
WHERE ws_sold_date_sk = d_date_sk
AND d_year BETWEEN {q06_YEAR} AND {q06_YEAR + 1}
GROUP BY ws_bill_customer_sk
-- required to avoid division by 0, because later we will divide by this value
HAVING sum( case when (d_year = {q06_YEAR}) THEN (((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2.0)ELSE 0.0 END) > 0.0
)
-- MAIN QUERY
SELECT
CAST( (web.second_year_total / web.first_year_total) AS DOUBLE) AS web_sales_increase_ratio,
c_customer_sk,
c_first_name,
c_last_name,
c_preferred_cust_flag,
c_birth_country,
c_login,
c_email_address
FROM temp_table_1 store,
temp_table_2 web,
customer c
WHERE store.customer_sk = web.customer_sk
AND web.customer_sk = c_customer_sk
-- if customer has sales in first year for both store and websales,
-- select him only if web second_year_total/first_year_total
-- ratio is bigger then his store second_year_total/first_year_total ratio.
AND (web.second_year_total / web.first_year_total) >
(store.second_year_total / store.first_year_total)
ORDER BY
web_sales_increase_ratio DESC,
c_customer_sk,
c_first_name,
c_last_name,
c_preferred_cust_flag,
c_birth_country,
c_login
LIMIT {q06_LIMIT}
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q06/gpu_bdb_query_06_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q06_utils import (
q06_LIMIT,
q06_YEAR,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query = f"""
WITH temp_table_1 as
(
SELECT ss_customer_sk AS customer_sk,
sum( case when (d_year = {q06_YEAR}) THEN (((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2.0) ELSE 0.0 END)
AS first_year_total,
sum( case when (d_year = {q06_YEAR + 1}) THEN (((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2.0) ELSE 0.0 END)
AS second_year_total
FROM store_sales,
date_dim
WHERE ss_sold_date_sk = d_date_sk
AND d_year BETWEEN {q06_YEAR} AND {q06_YEAR + 1}
GROUP BY ss_customer_sk
-- first_year_total is an aggregation, rewrite all sum () statement
HAVING sum( case when (d_year = {q06_YEAR}) THEN (((ss_ext_list_price-ss_ext_wholesale_cost-ss_ext_discount_amt)+ss_ext_sales_price)/2.0) ELSE 0.0 END) > 0.0
),
temp_table_2 AS
(
SELECT ws_bill_customer_sk AS customer_sk ,
sum( case when (d_year = {q06_YEAR}) THEN (((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2.0) ELSE 0.0 END)
AS first_year_total,
sum( case when (d_year = {q06_YEAR + 1}) THEN (((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2.0) ELSE 0.0 END)
AS second_year_total
FROM web_sales,
date_dim
WHERE ws_sold_date_sk = d_date_sk
AND d_year BETWEEN {q06_YEAR} AND {q06_YEAR + 1}
GROUP BY ws_bill_customer_sk
-- required to avoid division by 0, because later we will divide by this value
HAVING sum( case when (d_year = {q06_YEAR}) THEN (((ws_ext_list_price-ws_ext_wholesale_cost-ws_ext_discount_amt)+ws_ext_sales_price)/2.0)ELSE 0.0 END) > 0.0
)
-- MAIN QUERY
SELECT
CAST( (web.second_year_total / web.first_year_total) AS DOUBLE) AS web_sales_increase_ratio,
c_customer_sk,
c_first_name,
c_last_name,
c_preferred_cust_flag,
c_birth_country,
c_login,
c_email_address
FROM temp_table_1 store,
temp_table_2 web,
customer c
WHERE store.customer_sk = web.customer_sk
AND web.customer_sk = c_customer_sk
-- if customer has sales in first year for both store and websales,
-- select him only if web second_year_total/first_year_total
-- ratio is bigger then his store second_year_total/first_year_total ratio.
AND (web.second_year_total / web.first_year_total) >
(store.second_year_total / store.first_year_total)
ORDER BY
web_sales_increase_ratio DESC,
c_customer_sk,
c_first_name,
c_last_name,
c_preferred_cust_flag,
c_birth_country,
c_login
LIMIT {q06_LIMIT}
"""
result = c.sql(query)
return result
@annotate("QUERY6", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q02/gpu_bdb_query_02_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.sessionization import get_distinct_sessions
from dask.distributed import wait
# -------- Q2 -----------
q02_item_sk = 10001
q02_limit = 30
q02_session_timeout_inSec = 3600
def read_tables(data_dir, bc):
bc.create_table("web_clickstreams",
os.path.join(data_dir, "web_clickstreams/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_1 = """
SELECT
CAST(wcs_user_sk AS INTEGER) AS wcs_user_sk,
CAST(wcs_item_sk AS INTEGER) AS wcs_item_sk,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec
FROM web_clickstreams
WHERE wcs_item_sk IS NOT NULL
AND wcs_user_sk IS NOT NULL
ORDER BY wcs_user_sk
"""
wcs_result = bc.sql(query_1)
session_df = wcs_result.map_partitions(
get_distinct_sessions,
keep_cols=["wcs_user_sk", "wcs_item_sk"],
time_out=q02_session_timeout_inSec,
)
del wcs_result
session_df = session_df.persist()
wait(session_df)
bc.create_table('session_df', session_df)
last_query = f"""
WITH item_df AS (
SELECT wcs_user_sk, session_id
FROM session_df
WHERE wcs_item_sk = {q02_item_sk}
)
SELECT sd.wcs_item_sk as item_sk_1,
count(sd.wcs_item_sk) as cnt
FROM session_df sd
INNER JOIN item_df id
ON sd.wcs_user_sk = id.wcs_user_sk
AND sd.session_id = id.session_id
AND sd.wcs_item_sk <> {q02_item_sk}
GROUP BY sd.wcs_item_sk
ORDER BY cnt desc
LIMIT {q02_limit}
"""
result = bc.sql(last_query)
result["item_sk_2"] = q02_item_sk
result_order = ["item_sk_1", "item_sk_2", "cnt"]
result = result[result_order]
del session_df
bc.drop_table("session_df")
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q02/gpu_bdb_query_02_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.sessionization import get_distinct_sessions
from bdb_tools.q02_utils import (
q02_item_sk,
q02_limit,
q02_session_timeout_inSec,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = """
SELECT
CAST(wcs_user_sk AS INTEGER) AS wcs_user_sk,
CAST(wcs_item_sk AS INTEGER) AS wcs_item_sk,
(wcs_click_date_sk * 86400 + wcs_click_time_sk) AS tstamp_inSec
FROM web_clickstreams
WHERE wcs_item_sk IS NOT NULL
AND wcs_user_sk IS NOT NULL
DISTRIBUTE BY wcs_user_sk
"""
wcs_result = c.sql(query_1)
session_df = wcs_result.map_partitions(
get_distinct_sessions,
keep_cols=["wcs_user_sk", "wcs_item_sk"],
time_out=q02_session_timeout_inSec,
)
del wcs_result
c.create_table('session_df', session_df, persist=False)
last_query = f"""
WITH item_df AS (
SELECT wcs_user_sk, session_id
FROM session_df
WHERE wcs_item_sk = {q02_item_sk}
)
SELECT sd.wcs_item_sk as item_sk_1,
count(sd.wcs_item_sk) as cnt
FROM session_df sd
INNER JOIN item_df id
ON sd.wcs_user_sk = id.wcs_user_sk
AND sd.session_id = id.session_id
AND sd.wcs_item_sk <> {q02_item_sk}
GROUP BY sd.wcs_item_sk
ORDER BY cnt desc
LIMIT {q02_limit}
"""
result = c.sql(last_query)
result["item_sk_2"] = q02_item_sk
result_order = ["item_sk_1", "item_sk_2", "cnt"]
result = result[result_order]
del session_df
c.drop_table("session_df")
return result
@annotate("QUERY2", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q02/gpu_bdb_query_02.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.sessionization import get_distinct_sessions
from bdb_tools.q02_utils import (
q02_item_sk,
q02_limit,
q02_session_timeout_inSec,
read_tables
)
### Implementation Notes:
### Future Notes:
# The bottleneck of current implimenation is `set-index`, once ucx is working correctly
# it should go away
def get_relevant_item_series(df, q02_item_sk):
"""
Returns relevant items directly
"""
item_df = df[df["wcs_item_sk"] == q02_item_sk].reset_index(drop=True)
pair_df = item_df.merge(
df, on=["wcs_user_sk", "session_id"], suffixes=["_t1", "_t2"], how="inner"
)
return pair_df[pair_df["wcs_item_sk_t2"] != q02_item_sk][
"wcs_item_sk_t2"
].reset_index(drop=True)
def reduction_function(df, q02_session_timeout_inSec):
### get session_df
df = get_distinct_sessions(
df, keep_cols=["wcs_user_sk", "wcs_item_sk"], time_out=q02_session_timeout_inSec
)
item_series = get_relevant_item_series(df, q02_item_sk)
# bringing unique items viewed with query item at cudf level
del df
grouped_df = item_series.value_counts().reset_index(drop=False)
del item_series
grouped_df.columns = ["i_item_sk", "cnt"]
return grouped_df
def pre_repartition_task(wcs_df):
f_wcs_df = wcs_df[
wcs_df["wcs_item_sk"].notnull() & wcs_df["wcs_user_sk"].notnull()
].reset_index(drop=True)
f_wcs_df["tstamp_inSec"] = (
f_wcs_df["wcs_click_date_sk"] * 24 * 60 * 60 + f_wcs_df["wcs_click_time_sk"]
)
cols_2_keep = ["wcs_user_sk", "wcs_item_sk", "tstamp_inSec"]
f_wcs_df = f_wcs_df[cols_2_keep]
### for map reduce task, we set index to ensure
### that the click for each user ends up at the same partition
f_wcs_df["wcs_user_sk"] = f_wcs_df["wcs_user_sk"].astype("int32")
f_wcs_df["wcs_item_sk"] = f_wcs_df["wcs_item_sk"].astype("int32")
return f_wcs_df
def main(client, config):
wcs_df = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
### filter nulls
# SELECT
# wcs_user_sk,
# wcs_item_sk,
# (wcs_click_date_sk * 24 * 60 * 60 + wcs_click_time_sk) AS tstamp_inSec
# FROM web_clickstreams
# WHERE wcs_item_sk IS NOT NULL
# AND wcs_user_sk IS NOT NULL
f_wcs_df = wcs_df.map_partitions(pre_repartition_task)
f_wcs_df = f_wcs_df.shuffle(on=["wcs_user_sk"])
### Main Query
# SELECT
# item_sk_1,${hiveconf:q02_item_sk} AS item_sk_2, COUNT (*) AS cnt
# FROM
# (
# )
# GROUP BY item_sk_1
# ORDER BY
# cnt DESC,
# item_sk_1
# LIMIT ${hiveconf:q02_limit};
# q02_limit=30
grouped_df = f_wcs_df.map_partitions(reduction_function, q02_session_timeout_inSec)
items_value_counts = grouped_df.groupby(["i_item_sk"]).cnt.sum()
items_value_counts = items_value_counts.map_partitions(
lambda ser: ser.sort_values(ascending=False)
)
### final calculation on 30 values
result_df = items_value_counts.reset_index(drop=False)
result_df.columns = ["item_sk_1", "cnt"]
result_df = result_df.head(q02_limit)
result_df["item_sk_2"] = q02_item_sk
result_order = ["item_sk_1", "item_sk_2", "cnt"]
result_df = result_df[result_order]
return result_df
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q17/gpu_bdb_query_17_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q17_utils import (
q17_gmt_offset,
q17_year,
q17_month,
read_tables
)
q17_i_category_IN = "'Books', 'Music'"
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_date = f"""
select min(d_date_sk) as min_d_date_sk,
max(d_date_sk) as max_d_date_sk
from date_dim
where d_year = {q17_year}
and d_moy = {q17_month}
"""
dates_result = c.sql(query_date).compute()
min_date_sk_val = dates_result["min_d_date_sk"][0]
max_date_sk_val = dates_result["max_d_date_sk"][0]
query = f"""
SELECT sum(promotional) as promotional,
sum(total) as total,
CASE WHEN sum(total) > 0.0 THEN (100.0 * sum(promotional)) / sum(total)
ELSE 0.0 END as promo_percent
FROM
(
SELECT p_channel_email,
p_channel_dmail,
p_channel_tv,
SUM( CAST(ss_ext_sales_price AS DOUBLE) ) total,
CASE WHEN (p_channel_dmail = 'Y' OR p_channel_email = 'Y' OR p_channel_tv = 'Y')
THEN SUM(CAST(ss_ext_sales_price AS DOUBLE)) ELSE 0 END as promotional
FROM store_sales ss
INNER JOIN promotion p ON ss.ss_promo_sk = p.p_promo_sk
inner join item i on ss.ss_item_sk = i.i_item_sk
inner join store s on ss.ss_store_sk = s.s_store_sk
inner join customer c on c.c_customer_sk = ss.ss_customer_sk
inner join customer_address ca
on c.c_current_addr_sk = ca.ca_address_sk
WHERE i.i_category IN ({q17_i_category_IN})
AND s.s_gmt_offset = {q17_gmt_offset}
AND ca.ca_gmt_offset = {q17_gmt_offset}
AND ss.ss_sold_date_sk >= {min_date_sk_val}
AND ss.ss_sold_date_sk <= {max_date_sk_val}
GROUP BY p_channel_email, p_channel_dmail, p_channel_tv
) sum_promotional
-- we don't need a 'ON' join condition. result is just two numbers.
"""
result = c.sql(query)
return result
@annotate("QUERY17", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q17/gpu_bdb_query_17.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
left_semi_join,
run_query,
)
from bdb_tools.q17_utils import (
q17_gmt_offset,
q17_year,
q17_month,
store_sales_cols,
read_tables
)
q17_i_category_IN = "Books", "Music"
def main(client, config):
(
store_sales_df,
item_df,
customer_df,
store_df,
date_dim_df,
customer_address_df,
promotion_df,
) = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
# store_sales ss LEFT SEMI JOIN date_dim dd ON ss.ss_sold_date_sk = dd.d_date_sk AND dd.d_year = ${hiveconf:q17_year} AND dd.d_moy = ${hiveconf:q17_month}
filtered_date_df = date_dim_df.query(
f"d_year == {q17_year} and d_moy == {q17_month}", meta=date_dim_df._meta
).reset_index(drop=True)
ss_date_join = left_semi_join(
store_sales_df,
filtered_date_df,
left_on=["ss_sold_date_sk"],
right_on=["d_date_sk"],
)
ss_date_join = ss_date_join[store_sales_cols]
# LEFT SEMI JOIN item i ON ss.ss_item_sk = i.i_item_sk AND i.i_category IN (${hiveconf:q17_i_category_IN})
filtered_item_df = item_df.loc[
item_df["i_category"].isin(q17_i_category_IN)
].reset_index(drop=True)
ss_date_item_join = left_semi_join(
ss_date_join, filtered_item_df, left_on=["ss_item_sk"], right_on=["i_item_sk"]
)
# LEFT SEMI JOIN store s ON ss.ss_store_sk = s.s_store_sk AND s.s_gmt_offset = ${hiveconf:q17_gmt_offset}
filtered_store_df = store_df.query(
f"s_gmt_offset == {q17_gmt_offset}", meta=store_df._meta
).reset_index(drop=True)
ss_date_item_store_join = left_semi_join(
ss_date_item_join,
filtered_store_df,
left_on=["ss_store_sk"],
right_on=["s_store_sk"],
)
# (SELECT c.c_customer_sk FROM customer c LEFT SEMI JOIN customer_address ca
# ON c.c_current_addr_sk = ca.ca_address_sk AND ca.ca_gmt_offset = ${hiveconf:q17_gmt_offset}
# ) sub_c
filtered_customer_address = customer_address_df.query(
f"ca_gmt_offset == {q17_gmt_offset}"
).reset_index(drop=True)
sub_c = left_semi_join(
customer_df,
filtered_customer_address,
left_on=["c_current_addr_sk"],
right_on=["ca_address_sk"],
)
# sub_c ON ss.ss_customer_sk = sub_c.c_customer_sk
ss_date_item_store_customer_join = left_semi_join(
ss_date_item_store_join,
sub_c,
left_on=["ss_customer_sk"],
right_on=["c_customer_sk"],
)
# JOIN promotion p ON ss.ss_promo_sk = p.p_promo_sk
ss_date_item_store_customer_promotion_join = ss_date_item_store_customer_join.merge(
promotion_df, left_on="ss_promo_sk", right_on="p_promo_sk", how="inner"
)
final_df = ss_date_item_store_customer_promotion_join
# SELECT p_channel_email, p_channel_dmail, p_channel_tv,
# CASE WHEN (p_channel_dmail = 'Y' OR p_channel_email = 'Y' OR p_channel_tv = 'Y')
# THEN SUM(ss_ext_sales_price) ELSE 0 END as promotional,
# SUM(ss_ext_sales_price) total
# ...
# GROUP BY p_channel_email, p_channel_dmail, p_channel_tv
### filling na because `pandas` and `cudf` ignore nulls when grouping stuff
final_df["p_channel_email"] = final_df["p_channel_email"].fillna("None")
final_df["p_channel_dmail"] = final_df["p_channel_dmail"].fillna("None")
final_df["p_channel_tv"] = final_df["p_channel_tv"].fillna("None")
# SELECT sum(promotional) as promotional, sum(total) as total,
# CASE WHEN sum(total) > 0 THEN 100*sum(promotional)/sum(total)
# ELSE 0.0 END as promo_percent
group_cols = ["p_channel_email", "p_channel_dmail", "p_channel_tv"]
### max group_columnss should be 27 (3*3*3)[N,Y,None]
### so computing is fine
grouped_df = final_df.groupby(by=group_cols).agg(
{"ss_ext_sales_price": "sum", "ss_ext_sales_price": "sum"}
)
grouped_df = grouped_df.compute()
gr_df = grouped_df.reset_index()
gr_df = gr_df.rename(columns={"ss_ext_sales_price": "total"})
prom_flag = (
(gr_df["p_channel_dmail"] == "Y")
| (gr_df["p_channel_email"] == "Y")
| (gr_df["p_channel_tv"] == "Y")
)
### CASE WHEN (p_channel_dmail = 'Y' OR p_channel_email = 'Y' OR p_channel_tv = 'Y')
### THEN SUM(ss_ext_sales_price) ELSE 0 END as promotional,
gr_df["promotional"] = 0
gr_df["promotional"][prom_flag] = gr_df["total"][prom_flag]
total_sum = gr_df["total"].sum()
prom_sum = gr_df["promotional"].sum()
prom_per = 0
if prom_sum != 0:
prom_per = prom_sum / total_sum * 100
print("Prom SUM = {}".format(prom_sum))
print("Prom Per = {}".format(prom_per))
print("Total SUM = {}".format(total_sum))
return cudf.DataFrame(
{"promotional": prom_sum, "total": total_sum, "promo_percent": prom_per}
)
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q17/README.md | # Query 17
In this query, We find the ratio of items sold with and without promotions in a given month and year. Only items in certain
categories sold to customers living in a specific time zone are considered.
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q17/gpu_bdb_query_17_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
# ------- Q17 ------
q17_gmt_offset = -5.0
# --store_sales date
q17_year = 2001
q17_month = 12
q17_i_category_IN = "'Books', 'Music'"
def read_tables(data_dir, bc):
bc.create_table("store_sales", os.path.join(data_dir, "store_sales/*.parquet"))
bc.create_table("item", os.path.join(data_dir, "item/*.parquet"))
bc.create_table("customer", os.path.join(data_dir, "customer/*.parquet"))
bc.create_table("store", os.path.join(data_dir, "store/*.parquet"))
bc.create_table("date_dim", os.path.join(data_dir, "date_dim/*.parquet"))
bc.create_table("customer_address", os.path.join(data_dir, "customer_address/*.parquet"))
bc.create_table("promotion", os.path.join(data_dir, "promotion/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query_date = f"""
select min(d_date_sk) as min_d_date_sk,
max(d_date_sk) as max_d_date_sk
from date_dim
where d_year = {q17_year}
and d_moy = {q17_month}
"""
dates_result = bc.sql(query_date).compute()
min_date_sk_val = dates_result["min_d_date_sk"][0]
max_date_sk_val = dates_result["max_d_date_sk"][0]
query = f"""
SELECT sum(promotional) as promotional,
sum(total) as total,
CASE WHEN sum(total) > 0.0 THEN (100.0 * sum(promotional)) / sum(total)
ELSE 0.0 END as promo_percent
FROM
(
SELECT p_channel_email,
p_channel_dmail,
p_channel_tv,
SUM( CAST(ss_ext_sales_price AS DOUBLE) ) total,
CASE WHEN (p_channel_dmail = 'Y' OR p_channel_email = 'Y' OR p_channel_tv = 'Y')
THEN SUM(CAST(ss_ext_sales_price AS DOUBLE)) ELSE 0 END as promotional
FROM store_sales ss
INNER JOIN promotion p ON ss.ss_promo_sk = p.p_promo_sk
inner join item i on ss.ss_item_sk = i.i_item_sk
inner join store s on ss.ss_store_sk = s.s_store_sk
inner join customer c on c.c_customer_sk = ss.ss_customer_sk
inner join customer_address ca
on c.c_current_addr_sk = ca.ca_address_sk
WHERE i.i_category IN ({q17_i_category_IN})
AND s.s_gmt_offset = {q17_gmt_offset}
AND ca.ca_gmt_offset = {q17_gmt_offset}
AND ss.ss_sold_date_sk >= {min_date_sk_val}
AND ss.ss_sold_date_sk <= {max_date_sk_val}
GROUP BY p_channel_email, p_channel_dmail, p_channel_tv
) sum_promotional
-- we don't need a 'ON' join condition. result is just two numbers.
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q22/gpu_bdb_query_22_dask_sql.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nvtx import annotate
import numpy as np
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query
)
from bdb_tools.q22_utils import (
q22_date,
q22_i_current_price_min,
q22_i_current_price_max,
read_tables
)
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
# Filter limit in days
min_date = np.datetime64(q22_date, "D").astype(int) - 30
max_date = np.datetime64(q22_date, "D").astype(int) + 30
d_date_int = np.datetime64(q22_date, "D").astype(int)
ratio_min = 2.0 / 3.0
ratio_max = 3.0 / 2.0
query = f"""
SELECT
w_warehouse_name,
i_item_id,
SUM(CASE WHEN d_date - {d_date_int} < 0 THEN inv_quantity_on_hand ELSE 0 END) AS inv_before,
SUM(CASE WHEN d_date - {d_date_int} >= 0 THEN inv_quantity_on_hand ELSE 0 END) AS inv_after
FROM
inventory inv,
item i,
warehouse w,
date_dim d
WHERE i_current_price BETWEEN {q22_i_current_price_min} AND {q22_i_current_price_max}
AND i_item_sk = inv_item_sk
AND inv_warehouse_sk = w_warehouse_sk
AND inv_date_sk = d_date_sk
AND d_date >= {min_date}
AND d_date <= {max_date}
GROUP BY w_warehouse_name, i_item_id
"""
intermediate = c.sql(query)
c.create_table("intermediate", intermediate ,persist=False)
query_2 = f"""
SELECT
w_warehouse_name,
i_item_id,
inv_before,
inv_after
FROM intermediate
WHERE inv_before > 0
AND CAST(inv_after AS DOUBLE) / CAST(inv_before AS DOUBLE) >= {ratio_min}
AND CAST(inv_after AS DOUBLE) / CAST(inv_before AS DOUBLE) <= {ratio_max}
ORDER BY w_warehouse_name, i_item_id
LIMIT 100
"""
result = c.sql(query_2)
return result
@annotate("QUERY22", color="green", domain="gpu-bdb")
def start_run():
config = gpubdb_argparser()
client, c = attach_to_cluster(config, create_sql_context=True)
run_query(config=config, client=client, query_func=main, sql_context=c)
if __name__ == "__main__":
start_run()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q22/gpu_bdb_query_22.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
from bdb_tools.q22_utils import (
q22_date,
q22_i_current_price_min,
q22_i_current_price_max,
read_tables
)
def inventory_before_after(df, date):
df["inv_before"] = df["inv_quantity_on_hand"].copy()
df.loc[df["d_date"] >= date, "inv_before"] = 0
df["inv_after"] = df["inv_quantity_on_hand"].copy()
df.loc[df["d_date"] < date, "inv_after"] = 0
return df
def main(client, config):
inventory, item, warehouse, date_dim = benchmark(
read_tables,
config=config,
compute_result=config["get_read_time"],
)
item = item.query(
"i_current_price >= @q22_i_current_price_min and i_current_price<= @q22_i_current_price_max",
meta=item._meta,
local_dict={
"q22_i_current_price_min": q22_i_current_price_min,
"q22_i_current_price_max": q22_i_current_price_max,
},
).reset_index(drop=True)
item = item[["i_item_id", "i_item_sk"]]
output_table = inventory.merge(
item, left_on=["inv_item_sk"], right_on=["i_item_sk"], how="inner"
)
keep_columns = [
"inv_warehouse_sk",
"inv_date_sk",
"inv_quantity_on_hand",
"i_item_id",
]
output_table = output_table[keep_columns]
# Filter limit in days
min_date = np.datetime64(q22_date, "D").astype(int) - 30
max_date = np.datetime64(q22_date, "D").astype(int) + 30
date_dim = date_dim.query(
"d_date>=@min_date and d_date<=@max_date",
meta=date_dim._meta,
local_dict={"min_date": min_date, "max_date": max_date},
).reset_index(drop=True)
output_table = output_table.merge(
date_dim, left_on=["inv_date_sk"], right_on=["d_date_sk"], how="inner"
)
keep_columns = ["i_item_id", "inv_quantity_on_hand", "inv_warehouse_sk", "d_date"]
output_table = output_table[keep_columns]
output_table = output_table.merge(
warehouse,
left_on=["inv_warehouse_sk"],
right_on=["w_warehouse_sk"],
how="inner",
)
keep_columns = ["i_item_id", "inv_quantity_on_hand", "d_date", "w_warehouse_name"]
output_table = output_table[keep_columns]
d_date_int = np.datetime64(q22_date, "D").astype(int)
output_table = output_table.map_partitions(inventory_before_after, d_date_int)
keep_columns = ["i_item_id", "w_warehouse_name", "inv_before", "inv_after"]
output_table = output_table[keep_columns]
output_table = (
output_table.groupby(by=["w_warehouse_name", "i_item_id"], sort=True)
.agg({"inv_before": "sum", "inv_after": "sum"})
.reset_index()
)
output_table["inv_ratio"] = output_table["inv_after"] / output_table["inv_before"]
ratio_min = 2.0 / 3.0
ratio_max = 3.0 / 2.0
output_table = output_table.query(
"inv_ratio>=@ratio_min and inv_ratio<=@ratio_max",
meta=output_table._meta,
local_dict={"ratio_min": ratio_min, "ratio_max": ratio_max},
)
keep_columns = [
"w_warehouse_name",
"i_item_id",
"inv_before",
"inv_after",
]
output_table = output_table[keep_columns]
## for query 22 the results vary after 6 th decimal place
return output_table.head(100)
if __name__ == "__main__":
from bdb_tools.cluster_startup import attach_to_cluster
config = gpubdb_argparser()
client, bc = attach_to_cluster(config)
run_query(config=config, client=client, query_func=main)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb/queries | rapidsai_public_repos/gpu-bdb/gpu_bdb/queries/q22/gpu_bdb_query_22_sql.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import os
from bdb_tools.cluster_startup import attach_to_cluster
from bdb_tools.utils import (
benchmark,
gpubdb_argparser,
run_query,
)
# -------- Q22 -----------
q22_date = "2001-05-08"
q22_i_current_price_min = "0.98"
q22_i_current_price_max = "1.5"
def read_tables(data_dir, bc):
bc.create_table('inventory', os.path.join(data_dir, "inventory/*.parquet"))
bc.create_table('item', os.path.join(data_dir, "item/*.parquet"))
bc.create_table('warehouse', os.path.join(data_dir, "warehouse/*.parquet"))
bc.create_table('date_dim', os.path.join(data_dir, "date_dim/*.parquet"))
def main(data_dir, client, bc, config):
benchmark(read_tables, data_dir, bc, dask_profile=config["dask_profile"])
query = f"""
SELECT
w_warehouse_name,
i_item_id,
SUM(CASE WHEN timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp))
/ 1000000 < 0 THEN inv_quantity_on_hand ELSE 0 END) AS inv_before,
SUM(CASE WHEN timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp))
/ 1000000 >= 0 THEN inv_quantity_on_hand ELSE 0 END) AS inv_after
FROM
inventory inv,
item i,
warehouse w,
date_dim d
WHERE i_current_price BETWEEN {q22_i_current_price_min} AND {q22_i_current_price_max}
AND i_item_sk = inv_item_sk
AND inv_warehouse_sk = w_warehouse_sk
AND inv_date_sk = d_date_sk
AND timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp)) / 1000000 >= -30
AND timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp)) / 1000000 <= 30
GROUP BY w_warehouse_name, i_item_id
HAVING SUM(CASE WHEN timestampdiff(DAY, timestamp '{q22_date}', CAST(d_date || ' 00:00:00' AS timestamp))
/ 1000000 < 0 THEN inv_quantity_on_hand ELSE 0 END) > 0
AND
(
CAST(
SUM (CASE WHEN timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp)) / 1000000 >= 0 THEN inv_quantity_on_hand ELSE 0 END) AS DOUBLE)
/ CAST( SUM(CASE WHEN timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp)) / 1000000 < 0 THEN inv_quantity_on_hand ELSE 0 END)
AS DOUBLE) >= 0.666667
)
AND
(
CAST(
SUM(CASE WHEN timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp)) / 1000000 >= 0 THEN inv_quantity_on_hand ELSE 0 END) AS DOUBLE)
/ CAST ( SUM(CASE WHEN timestampdiff(DAY, timestamp '{q22_date} 00:00:00', CAST(d_date || ' 00:00:00' AS timestamp)) / 1000000 < 0 THEN inv_quantity_on_hand ELSE 0 END)
AS DOUBLE) <= 1.50
)
ORDER BY w_warehouse_name, i_item_id
LIMIT 100
"""
result = bc.sql(query)
return result
if __name__ == "__main__":
config = gpubdb_argparser()
client, bc = attach_to_cluster(config, create_blazing_context=True)
run_query(config=config, client=client, query_func=main, blazing_context=bc)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q26_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from bdb_tools.readers import build_reader
Q26_CATEGORY = "Books"
Q26_ITEM_COUNT = 5
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
ss_cols = ["ss_customer_sk", "ss_item_sk"]
items_cols = ["i_item_sk", "i_category", "i_class_id"]
ss_ddf = table_reader.read("store_sales", relevant_cols=ss_cols, index=False)
items_ddf = table_reader.read("item", relevant_cols=items_cols, index=False)
if c:
c.create_table("store_sales", ss_ddf, persist=False)
c.create_table("item", items_ddf, persist=False)
return ss_ddf, items_ddf
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q14_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
ws_columns = ["ws_ship_hdemo_sk", "ws_web_page_sk", "ws_sold_time_sk"]
web_sales = table_reader.read("web_sales", relevant_cols=ws_columns)
hd_columns = ["hd_demo_sk", "hd_dep_count"]
household_demographics = table_reader.read(
"household_demographics", relevant_cols=hd_columns
)
wp_columns = ["wp_web_page_sk", "wp_char_count"]
web_page = table_reader.read("web_page", relevant_cols=wp_columns)
td_columns = ["t_time_sk", "t_hour"]
time_dim = table_reader.read("time_dim", relevant_cols=td_columns)
if c:
c.create_table("household_demographics", household_demographics, persist=False)
c.create_table("web_page", web_page, persist=False)
c.create_table("web_sales", web_sales, persist=False)
c.create_table("time_dim", time_dim, persist=False)
return (web_sales, household_demographics, web_page, time_dim)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q22_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
from bdb_tools.utils import convert_datestring_to_days
q22_date = "2001-05-08"
q22_i_current_price_min = 0.98
q22_i_current_price_max = 1.5
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
inv_columns = [
"inv_item_sk",
"inv_warehouse_sk",
"inv_date_sk",
"inv_quantity_on_hand",
]
inventory = table_reader.read("inventory", relevant_cols=inv_columns)
item_columns = ["i_item_id", "i_current_price", "i_item_sk"]
item = table_reader.read("item", relevant_cols=item_columns)
warehouse_columns = ["w_warehouse_sk", "w_warehouse_name"]
warehouse = table_reader.read("warehouse", relevant_cols=warehouse_columns)
dd_columns = ["d_date_sk", "d_date"]
date_dim = table_reader.read("date_dim", relevant_cols=dd_columns)
date_dim = date_dim.map_partitions(convert_datestring_to_days)
if c:
c.create_table('inventory', inventory, persist=False)
c.create_table('item', item, persist=False)
c.create_table('warehouse', warehouse, persist=False)
c.create_table('date_dim', date_dim, persist=False)
return inventory, item, warehouse, date_dim
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q17_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
q17_gmt_offset = -5.0
# --store_sales date
q17_year = 2001
q17_month = 12
store_sales_cols = [
"ss_ext_sales_price",
"ss_sold_date_sk",
"ss_store_sk",
"ss_customer_sk",
"ss_promo_sk",
"ss_item_sk",
]
item_cols = ["i_category", "i_item_sk"]
customer_cols = ["c_customer_sk", "c_current_addr_sk"]
store_cols = ["s_gmt_offset", "s_store_sk"]
date_cols = ["d_date_sk", "d_year", "d_moy"]
customer_address_cols = ["ca_address_sk", "ca_gmt_offset"]
promotion_cols = ["p_channel_email", "p_channel_dmail", "p_channel_tv", "p_promo_sk"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
store_sales_df = table_reader.read("store_sales", relevant_cols=store_sales_cols)
item_df = table_reader.read("item", relevant_cols=item_cols)
customer_df = table_reader.read("customer", relevant_cols=customer_cols)
store_df = table_reader.read("store", relevant_cols=store_cols)
date_dim_df = table_reader.read("date_dim", relevant_cols=date_cols)
customer_address_df = table_reader.read(
"customer_address", relevant_cols=customer_address_cols
)
promotion_df = table_reader.read("promotion", relevant_cols=promotion_cols)
if c:
c.create_table("store_sales", store_sales_df, persist=False)
c.create_table("item", item_df, persist=False)
c.create_table("customer", customer_df, persist=False)
c.create_table("store", store_df, persist=False)
c.create_table("date_dim", date_dim_df, persist=False)
c.create_table("customer_address", customer_address_df, persist=False)
c.create_table("promotion", promotion_df, persist=False)
return (
store_sales_df,
item_df,
customer_df,
store_df,
date_dim_df,
customer_address_df,
promotion_df,
)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q24_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
ws_cols = ["ws_item_sk", "ws_sold_date_sk", "ws_quantity"]
item_cols = ["i_item_sk", "i_current_price"]
imp_cols = [
"imp_item_sk",
"imp_competitor_price",
"imp_start_date",
"imp_end_date",
"imp_sk",
]
ss_cols = ["ss_item_sk", "ss_sold_date_sk", "ss_quantity"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
### read tables
ws_df = table_reader.read("web_sales", relevant_cols=ws_cols)
item_df = table_reader.read("item", relevant_cols=item_cols)
imp_df = table_reader.read("item_marketprices", relevant_cols=imp_cols)
ss_df = table_reader.read("store_sales", relevant_cols=ss_cols)
if c:
c.create_table("web_sales", ws_df, persist=False)
c.create_table("item", item_df, persist=False)
c.create_table("item_marketprices", imp_df, persist=False)
c.create_table("store_sales", ss_df, persist=False)
return ws_df, item_df, imp_df, ss_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q01_utils.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
# -------- Q1 -----------
q01_i_category_id_IN = 1, 2, 3
# -- sf1 -> 11 stores, 90k sales in 820k lines
q01_ss_store_sk_IN = 10, 20, 33, 40, 50
q01_viewed_together_count = 50
q01_limit = 100
item_cols = ["i_item_sk", "i_category_id"]
ss_cols = ["ss_item_sk", "ss_store_sk", "ss_ticket_number"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"]
)
item_df = table_reader.read("item", relevant_cols=item_cols)
ss_df = table_reader.read("store_sales", relevant_cols=ss_cols)
if c:
c.create_table("item", item_df, persist=False)
c.create_table("store_sales", ss_df, persist=False)
return item_df, ss_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/merge_util.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask
from dask.utils import apply
from dask.highlevelgraph import HighLevelGraph
from dask.dataframe.core import new_dd_object
from dask.dataframe.multi import merge_chunk
def hash_merge(
lhs,
left_on,
rhs,
right_on,
how="inner",
npartitions=None,
suffixes=("_x", "_y"),
shuffle=None,
indicator=False,
):
if npartitions is None:
npartitions = max(lhs.npartitions, rhs.npartitions)
lhs2 = lhs.shuffle(on=left_on, npartitions=npartitions)
rhs2 = rhs.shuffle(on=right_on, npartitions=npartitions)
kwargs = dict(
how=how,
left_on=left_on,
right_on=right_on,
suffixes=suffixes,
indicator=indicator,
)
meta = lhs._meta_nonempty.merge(rhs._meta_nonempty, **kwargs)
if isinstance(left_on, list):
left_on = (list, tuple(left_on))
if isinstance(right_on, list):
right_on = (list, tuple(right_on))
token = dask.base.tokenize(lhs2, rhs2, npartitions, shuffle, **kwargs)
name = "hash-join-" + token
kwargs["empty_index_dtype"] = meta.index.dtype
dsk = {
(name, i): (
dask.utils.apply,
merge_chunk,
[(lhs2._name, i), (rhs2._name, i)],
kwargs,
)
for i in range(npartitions)
}
divisions = [None] * (npartitions + 1)
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[lhs2, rhs2])
return new_dd_object(graph, name, meta, divisions)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q03_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
from numba import cuda, jit
from bdb_tools.readers import build_reader
q03_days_in_sec_before_purchase = 864000
q03_views_before_purchase = 5
q03_purchased_item_IN = 10001
q03_purchased_item_category_IN = 2, 3
q03_limit = 100
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
item_cols = ["i_category_id", "i_item_sk"]
wcs_cols = [
"wcs_user_sk",
"wcs_click_time_sk",
"wcs_click_date_sk",
"wcs_item_sk",
"wcs_sales_sk",
]
item_df = table_reader.read("item", relevant_cols=item_cols)
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table("web_clickstreams", wcs_df, persist=False)
c.create_table("item", item_df, persist=False)
return item_df
@jit(nopython=True)
def find_items_viewed_before_purchase_kernel_both(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N, i
):
# """
# Find the past N items viewed after a relevant purchase was made,
# as defined by the configuration of this query.
# """
if i < (relevant_idx_col.size): # boundary guard
# every relevant row gets N rows in the output, so we need to map the indexes
# back into their position in the original array
orig_idx = relevant_idx_col[i]
current_user = user_col[orig_idx]
# look at the previous N clicks (assume sorted descending)
rows_to_check = N
remaining_rows = user_col.size - orig_idx
if remaining_rows <= rows_to_check:
rows_to_check = remaining_rows - 1
for k in range(1, rows_to_check + 1):
if current_user != user_col[orig_idx + k]:
out_col[i * N + k - 1] = 0
# only checking relevant purchases via the relevant_idx_col
elif (timestamp_col[orig_idx + k] <= timestamp_col[orig_idx]) & (
timestamp_col[orig_idx + k]
>= (timestamp_col[orig_idx] - q03_days_in_sec_before_purchase)
):
out_col[i * N + k - 1] = item_col[orig_idx + k]
else:
out_col[i * N + k - 1] = 0
@cuda.jit
def find_items_viewed_before_purchase_kernel_gpu(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N
):
"""
Find the past N items viewed after a relevant purchase was made,
as defined by the configuration of this query.
"""
i = cuda.grid(1)
find_items_viewed_before_purchase_kernel_both(relevant_idx_col, user_col, timestamp_col, item_col, out_col, N, i)
@jit(nopython=True)
def find_items_viewed_before_purchase_kernel_cpu(
relevant_idx_col, user_col, timestamp_col, item_col, out_col, N
):
"""
Find the past N items viewed after a relevant purchase was made,
as defined by the configuration of this query.
"""
i = 0
find_items_viewed_before_purchase_kernel_both(relevant_idx_col, user_col, timestamp_col, item_col, out_col, N, i)
def apply_find_items_viewed(df, item_mappings):
# need to sort descending to ensure that the
# next N rows are the previous N clicks
import pandas as pd
import numpy as np
df = df.sort_values(
by=["wcs_user_sk", "tstamp", "wcs_sales_sk", "wcs_item_sk"],
ascending=[False, False, False, False],
)
df.reset_index(drop=True, inplace=True)
df["relevant_flag"] = (df.wcs_sales_sk != 0) & (
df.wcs_item_sk == q03_purchased_item_IN
)
df["relevant_idx_pos"] = df.index.to_series()
df.reset_index(drop=True, inplace=True)
# only allocate output for the relevant rows
sample = df.loc[df.relevant_flag == True]
sample.reset_index(drop=True, inplace=True)
N = q03_views_before_purchase
size = len(sample)
# we know this can be int32, since it's going to contain item_sks
out_arr = np.zeros(size * N, dtype=df["wcs_item_sk"].dtype, like=df["wcs_item_sk"].values)
if isinstance(df, cudf.DataFrame):
find_items_viewed_before_purchase_kernel_gpu.forall(size)(
sample["relevant_idx_pos"],
df["wcs_user_sk"],
df["tstamp"],
df["wcs_item_sk"],
out_arr,
N,
)
result = cudf.DataFrame({"prior_item_viewed": out_arr})
else:
find_items_viewed_before_purchase_kernel_cpu(
sample["relevant_idx_pos"].to_numpy(),
df["wcs_user_sk"].to_numpy(),
df["tstamp"].to_numpy(),
df["wcs_item_sk"].to_numpy(),
out_arr,
N,
)
result = pd.DataFrame({"prior_item_viewed": out_arr})
del out_arr
del df
del sample
filtered = result.merge(
item_mappings,
how="inner",
left_on=["prior_item_viewed"],
right_on=["i_item_sk"],
)
return filtered
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q21_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
store_sales_cols = [
"ss_item_sk",
"ss_store_sk",
"ss_customer_sk",
"ss_ticket_number",
"ss_quantity",
"ss_sold_date_sk",
]
date_cols = ["d_date_sk", "d_year", "d_moy"]
websale_cols = ["ws_item_sk", "ws_bill_customer_sk", "ws_quantity", "ws_sold_date_sk"]
sr_cols = [
"sr_item_sk",
"sr_customer_sk",
"sr_ticket_number",
"sr_return_quantity",
"sr_returned_date_sk",
]
store_cols = ["s_store_name", "s_store_id", "s_store_sk"]
item_cols = ["i_item_id", "i_item_desc", "i_item_sk"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
store_sales_df = table_reader.read("store_sales", relevant_cols=store_sales_cols)
date_dim_df = table_reader.read("date_dim", relevant_cols=date_cols)
web_sales_df = table_reader.read("web_sales", relevant_cols=websale_cols)
store_returns_df = table_reader.read("store_returns", relevant_cols=sr_cols)
store_table_df = table_reader.read("store", relevant_cols=store_cols)
item_table_df = table_reader.read("item", relevant_cols=item_cols)
if c:
c.create_table("store_sales", store_sales_df, persist=False)
c.create_table("date_dim", date_dim_df, persist=False)
c.create_table("item", item_table_df, persist=False)
c.create_table("web_sales", web_sales_df, persist=False)
c.create_table("store_returns", store_returns_df, persist=False)
c.create_table("store", store_table_df, persist=False)
return (
store_sales_df,
date_dim_df,
web_sales_df,
store_returns_df,
store_table_df,
item_table_df,
)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q10_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
eol_char = "è"
def read_tables(config, c=None):
### splitting by row groups for better parallelism
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=True,
backend=config["backend"],
)
product_reviews_cols = ["pr_item_sk", "pr_review_content", "pr_review_sk"]
product_reviews_df = table_reader.read(
"product_reviews", relevant_cols=product_reviews_cols,
)
if c:
c.create_table("product_reviews", product_reviews_df, persist=False)
return product_reviews_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q30_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
# session timeout in secs
q30_session_timeout_inSec = 3600
# query output limit
q30_limit = 40
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
item_cols = ["i_category_id", "i_item_sk"]
item_df = table_reader.read("item", relevant_cols=item_cols)
wcs_cols = ["wcs_user_sk", "wcs_item_sk", "wcs_click_date_sk", "wcs_click_time_sk"]
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table('web_clickstreams', wcs_df, persist=False)
c.create_table('item', item_df, persist=False)
return item_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q13_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
date_cols = ["d_date_sk", "d_year"]
date_dim_df = table_reader.read("date_dim", relevant_cols=date_cols)
customer_cols = ["c_customer_sk", "c_customer_id", "c_first_name", "c_last_name"]
customer_df = table_reader.read("customer", relevant_cols=customer_cols)
s_sales_cols = ["ss_sold_date_sk", "ss_customer_sk", "ss_net_paid"]
s_sales_df = table_reader.read("store_sales", relevant_cols=s_sales_cols)
w_sales_cols = ["ws_sold_date_sk", "ws_bill_customer_sk", "ws_net_paid"]
web_sales_df = table_reader.read("web_sales", relevant_cols=w_sales_cols)
if c:
c.create_table("date_dim", date_dim_df, persist=False)
c.create_table("customer", customer_df, persist=False)
c.create_table("store_sales", s_sales_df, persist=False)
c.create_table("web_sales", web_sales_df, persist=False)
return (date_dim_df, customer_df, s_sales_df, web_sales_df)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q20_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.dataframe as dd
import dask_cudf
import pandas as pd
from dask import delayed
from bdb_tools.utils import train_clustering_model
from bdb_tools.readers import build_reader
# q20 parameters
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
store_sales_cols = [
"ss_customer_sk",
"ss_ticket_number",
"ss_item_sk",
"ss_net_paid",
]
store_returns_cols = [
"sr_item_sk",
"sr_customer_sk",
"sr_ticket_number",
"sr_return_amt",
]
store_sales_df = table_reader.read("store_sales", relevant_cols=store_sales_cols)
store_returns_df = table_reader.read(
"store_returns", relevant_cols=store_returns_cols
)
if c:
c.create_table("store_sales", store_sales_df, persist=False)
c.create_table("store_returns", store_returns_df, persist=False)
return store_sales_df, store_returns_df
def get_clusters(client, ml_input_df, feature_cols):
"""
Takes the dask client, kmeans_input_df and feature columns.
Returns a dictionary matching the output required for q20
"""
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in ml_input_df[feature_cols].to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
labels = results_dict["cid_labels"]
if isinstance(ml_input_df, dask_cudf.DataFrame):
labels_final = dask_cudf.from_cudf(labels, npartitions=ml_input_df.npartitions)
else:
labels_final = dd.from_pandas(pd.DataFrame(labels), npartitions=ml_input_df.npartitions)
ml_input_df["label"] = labels_final.reset_index()[0]
output = ml_input_df[["user_sk", "label"]]
results_dict["cid_labels"] = output
return results_dict
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/cupy_metrics.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
def cupy_precision_score(y, y_pred):
"""
Simple precision score method for two class models.
It is assumed that the positive class has label 1.
Implementation taken from rapidsai/cuml#1522
"""
pos_pred_ix = cp.where(y_pred == 1)
tp_sum = (y_pred[pos_pred_ix] == y[pos_pred_ix]).sum()
fp_sum = (y_pred[pos_pred_ix] != y[pos_pred_ix]).sum()
return (tp_sum / (tp_sum + fp_sum)).item()
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q11_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
product_review_cols = [
"pr_review_rating",
"pr_item_sk",
]
web_sales_cols = [
"ws_sold_date_sk",
"ws_net_paid",
"ws_item_sk",
]
date_cols = ["d_date_sk", "d_date"]
pr_df = table_reader.read("product_reviews", relevant_cols=product_review_cols)
# we only read int columns here so it should scale up to sf-10k as just 26M rows
pr_df = pr_df.repartition(npartitions=1)
ws_df = table_reader.read("web_sales", relevant_cols=web_sales_cols)
date_df = table_reader.read("date_dim", relevant_cols=date_cols)
if c:
c.create_table("web_sales", ws_df, persist=False)
c.create_table("product_reviews", pr_df, persist=False)
c.create_table("date_dim", date_df, persist=False)
return (pr_df, ws_df, date_df)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/sessionization.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import numpy as np
def get_session_id_from_session_boundry(session_change_df, last_session_len):
"""
This function returns session starts given a session change df
"""
import cudf
import pandas as pd
## we dont really need the `session_id` to start from 0
## the total number of sessions per partition should be fairly limited
## and we really should not hit 2,147,483,647 sessions per partition
## Can switch to vec_arange code to match spark 1-1
if isinstance(session_change_df, cudf.DataFrame):
user_session_ids = cp.arange(len(session_change_df), dtype=np.int32)
else:
user_session_ids = np.arange(len(session_change_df), dtype=np.int32)
### up shift the session length df
session_len = session_change_df["t_index"].diff().reset_index(drop=True)
session_len = session_len.shift(-1)
session_len.iloc[-1] = last_session_len
if isinstance(session_change_df, cudf.DataFrame):
session_id_final_series = (
cudf.Series(user_session_ids).repeat(session_len).reset_index(drop=True)
)
else:
session_id_final_series = (
pd.Series(user_session_ids).repeat(session_len).reset_index(drop=True)
)
return session_id_final_series
def get_session_id(df, keep_cols, time_out):
"""
This function creates a session id column for each click
The session id grows in incremeant for each user's susbequent session
Session boundry is defined by the time_out
"""
import cudf
df["user_change_flag"] = df["wcs_user_sk"].diff(periods=1) != 0
df["user_change_flag"] = df["user_change_flag"].fillna(True)
df["session_timeout_flag"] = df["tstamp_inSec"].diff(periods=1) > time_out
df["session_timeout_flag"] = df["session_timeout_flag"].fillna(False)
df["session_change_flag"] = df["session_timeout_flag"] | df["user_change_flag"]
# print(f"Total session change = {df['session_change_flag'].sum():,}")
keep_cols = list(keep_cols)
keep_cols += ["session_change_flag"]
df = df[keep_cols]
df = df.reset_index(drop=True)
if isinstance(df, cudf.DataFrame):
df["t_index"] = cp.arange(start=0, stop=len(df), dtype=np.int32)
else:
df["t_index"] = np.arange(start=0, stop=len(df), dtype=np.int32)
session_change_df = df[df["session_change_flag"]].reset_index(drop=True)
last_session_len = len(df) - session_change_df["t_index"].iloc[-1]
session_ids = get_session_id_from_session_boundry(
session_change_df, last_session_len
)
assert len(session_ids) == len(df)
return session_ids
def get_sessions(df, keep_cols, time_out=3600):
df = df.sort_values(by=["wcs_user_sk", "tstamp_inSec"]).reset_index(drop=True)
df["session_id"] = get_session_id(df, keep_cols, time_out)
keep_cols += ["session_id"]
df = df[keep_cols]
return df
def get_distinct_sessions(df, keep_cols, time_out=3600):
"""
### Performence note
The session + distinct
logic takes 0.2 seconds for a dataframe with 10M rows
on gv-100
"""
df = get_sessions(df, keep_cols, time_out=3600)
df = df.drop_duplicates().reset_index(drop=True)
return df
def get_pairs(
df,
merge_col=["session_id", "wcs_user_sk"],
pair_col="i_category_id",
output_col_1="category_id_1",
output_col_2="category_id_2",
):
"""
Gets pair after doing a inner merge
"""
pair_df = df.merge(df, on=merge_col, suffixes=["_t1", "_t2"], how="inner")
pair_df = pair_df[[f"{pair_col}_t1", f"{pair_col}_t2"]]
pair_df = pair_df[
pair_df[f"{pair_col}_t1"] < pair_df[f"{pair_col}_t2"]
].reset_index(drop=True)
pair_df.columns = [output_col_1, output_col_2]
return pair_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q08_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import pandas as pd
import cupy as cp
import numpy as np
from bdb_tools.readers import build_reader
q08_STARTDATE = "2001-09-02"
q08_ENDDATE = "2002-09-02"
q08_SECONDS_BEFORE_PURCHASE = 259200
NA_FLAG = 0
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
date_dim_cols = ["d_date_sk", "d_date"]
web_page_cols = ["wp_web_page_sk", "wp_type"]
web_sales_cols = ["ws_net_paid", "ws_order_number", "ws_sold_date_sk"]
wcs_cols = [
"wcs_user_sk",
"wcs_sales_sk",
"wcs_click_date_sk",
"wcs_click_time_sk",
"wcs_web_page_sk",
]
date_dim_df = table_reader.read("date_dim", relevant_cols=date_dim_cols)
web_page_df = table_reader.read("web_page", relevant_cols=web_page_cols)
web_sales_df = table_reader.read("web_sales", relevant_cols=web_sales_cols)
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table("web_clickstreams", wcs_df, persist=False)
c.create_table("web_sales", web_sales_df, persist=False)
c.create_table("web_page", web_page_df, persist=False)
c.create_table("date_dim", date_dim_df, persist=False)
return (date_dim_df, web_page_df, web_sales_df)
def get_session_id_from_session_boundary(session_change_df, last_session_len):
"""
This function returns session starts given a session change df
"""
user_session_ids = session_change_df.tstamp_inSec
### up shift the session length df
session_len = session_change_df["t_index"].diff().reset_index(drop=True)
session_len = session_len.shift(-1)
try:
session_len.iloc[-1] = last_session_len
except (AssertionError, IndexError): # IndexError in numba >= 0.48
if isinstance(session_change_df, cudf.DataFrame):
session_len = cudf.Series([])
else:
session_len = pd.Series([])
if isinstance(session_change_df, cudf.DataFrame):
session_id_final_series = (
cudf.Series(user_session_ids).repeat(session_len).reset_index(drop=True)
)
else:
session_id_final_series = (
pd.Series(user_session_ids).repeat(session_len).reset_index(drop=True)
)
return session_id_final_series
def get_session_id(df):
"""
This function creates a session id column for each click
The session id grows in incremeant for each user's susbequent session
Session boundry is defined by the time_out
"""
df["user_change_flag"] = df["wcs_user_sk"].diff(periods=1) != 0
df["user_change_flag"] = df["user_change_flag"].fillna(True)
df["session_change_flag"] = df["review_flag"] | df["user_change_flag"]
df = df.reset_index(drop=True)
df["t_index"] = np.arange(start=0, stop=len(df), dtype=np.int32, like=df["wcs_user_sk"].values)
session_change_df = df[df["session_change_flag"]].reset_index(drop=True)
try:
last_session_len = len(df) - session_change_df["t_index"].iloc[-1]
except (AssertionError, IndexError): # IndexError in numba >= 0.48
last_session_len = 0
session_ids = get_session_id_from_session_boundary(
session_change_df, last_session_len
)
assert len(session_ids) == len(df)
return session_ids
def get_sessions(df):
df = df.sort_values(
by=["wcs_user_sk", "tstamp_inSec", "wcs_sales_sk", "wp_type_codes"]
).reset_index(drop=True)
df["session_id"] = get_session_id(df)
return df
def get_unique_sales_keys_from_sessions(sessionized, review_cat_code):
sessionized["relevant"] = (
(sessionized.tstamp_inSec - sessionized.session_id)
<= q08_SECONDS_BEFORE_PURCHASE
) & (sessionized.wcs_sales_sk != NA_FLAG)
unique_sales_sk = (
sessionized.query(f"wcs_sales_sk != {NA_FLAG}")
.query("relevant == True")
.query(f"wp_type_codes != {review_cat_code}")
.wcs_sales_sk.unique()
)
return unique_sales_sk
def prep_for_sessionization(df, review_cat_code):
df = df.fillna(NA_FLAG)
df = df.sort_values(
by=["wcs_user_sk", "tstamp_inSec", "wcs_sales_sk", "wp_type_codes"]
).reset_index(drop=True)
review_df = df.loc[df["wp_type_codes"] == review_cat_code]
# per user, the index of the first review
# need this to decide if a review was "recent enough"
every_users_first_review = (
review_df[["wcs_user_sk", "tstamp_inSec"]]
.drop_duplicates()
.reset_index()
.groupby("wcs_user_sk")["index"]
.min()
.reset_index()
)
every_users_first_review.columns = ["wcs_user_sk", "first_review_index"]
# then reset the index to keep the old index before parallel join
df_merged = df.reset_index().merge(
every_users_first_review, how="left", on="wcs_user_sk"
)
df_filtered = df_merged.query("index >= first_review_index")
return df_filtered
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q04_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import pandas as pd
from bdb_tools.sessionization import get_sessions
from bdb_tools.readers import build_reader
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
wp_cols = ["wp_type", "wp_web_page_sk"]
wp_df = table_reader.read("web_page", relevant_cols=wp_cols)
wcs_cols = [
"wcs_user_sk",
"wcs_click_date_sk",
"wcs_click_time_sk",
"wcs_web_page_sk",
"wcs_sales_sk",
]
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table('web_page_wo_categorical', wp_df, persist=False)
c.create_table('web_clickstreams', wcs_df, persist=False)
return wp_df, wcs_df
def abandonedShoppingCarts(df, DYNAMIC_CAT_CODE, ORDER_CAT_CODE):
# Select groups where last dynamic row comes after last order row
filtered_df = df[
(df["wp_type_codes"] == ORDER_CAT_CODE)
| (df["wp_type_codes"] == DYNAMIC_CAT_CODE)
]
# Create a new column that is the concatenation of timestamp and wp_type_codes
# (eg:123456:3, 234567:5)
filtered_df["wp_type_codes"] = (
filtered_df["tstamp_inSec"]
.astype("string")
.str.cat(filtered_df["wp_type_codes"].astype("string"), sep=":")
)
# This gives the last occurrence (by timestamp) within the "order", "dynamic" wp_types
filtered_df = filtered_df.groupby(
["wcs_user_sk", "session_id"], as_index=False, sort=False
).agg({"wp_type_codes": "max"})
# If the max contains dynamic, keep the row else discard.
last_dynamic_df = filtered_df[
filtered_df["wp_type_codes"].str.contains(
":" + str(DYNAMIC_CAT_CODE), regex=False
)
]
del filtered_df
# Find counts for each group
grouped_count_df = df.groupby(
["wcs_user_sk", "session_id"], as_index=False, sort=False
).agg({"tstamp_inSec": "count"})
# Merge counts with the "dynamic" shopping cart groups
result = last_dynamic_df.merge(
grouped_count_df, on=["wcs_user_sk", "session_id"], how="inner"
)
del (last_dynamic_df, grouped_count_df)
if isinstance(df, cudf.DataFrame):
return cudf.DataFrame(
{"pagecount": result.tstamp_inSec.sum(), "count": len(result)}
)
else:
return pd.DataFrame(
{"pagecount": result.tstamp_inSec.sum(), "count": [len(result)]}
)
def reduction_function(df, keep_cols, DYNAMIC_CAT_CODE, ORDER_CAT_CODE):
df = get_sessions(df, keep_cols=keep_cols)
df = abandonedShoppingCarts(
df, DYNAMIC_CAT_CODE=DYNAMIC_CAT_CODE, ORDER_CAT_CODE=ORDER_CAT_CODE
)
return df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q23_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
q23_year = 2001
q23_month = 1
q23_coefficient = 1.3
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"], basepath=config["data_dir"],
)
date_cols = ["d_date_sk", "d_year", "d_moy"]
date_df = table_reader.read("date_dim", relevant_cols=date_cols)
inv_cols = [
"inv_warehouse_sk",
"inv_item_sk",
"inv_date_sk",
"inv_quantity_on_hand",
]
inv_df = table_reader.read("inventory", relevant_cols=inv_cols)
if c:
c.create_table('inventory', inv_df, persist=False)
c.create_table('date_dim', date_df, persist=False)
return date_df, inv_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q28_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import cupy as cp
import cupy
import cudf
import dask
from cuml.feature_extraction.text import HashingVectorizer
from cuml.dask.naive_bayes import MultinomialNB as DistMNB
from cuml.dask.common import to_dask_cudf
from cuml.dask.common.input_utils import DistributedDataHandler
from distributed import wait
from uuid import uuid1
from bdb_tools.readers import build_reader
N_FEATURES = 2 ** 23 # Spark is doing 2^20
ngram_range = (1, 2)
preprocessor = lambda s:s.str.lower()
norm = None
alternate_sign = False
def read_tables(config, c=None):
### splitting by row groups for better parallelism
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=True,
)
columns = [
"pr_review_content",
"pr_review_rating",
"pr_review_sk",
]
pr_df = table_reader.read("product_reviews", relevant_cols=columns)
if c:
c.create_table("product_reviews", pr_df, persist=False)
return pr_df
def gpu_hashing_vectorizer(x):
vec = HashingVectorizer(n_features=N_FEATURES,
alternate_sign=alternate_sign,
ngram_range=ngram_range,
norm=norm,
preprocessor=preprocessor
)
return vec.fit_transform(x)
def map_labels(ser):
output_ser = cudf.Series(cudf.core.column.full(size=len(ser), fill_value=2, dtype=np.int32))
zero_flag = (ser==1) | (ser==2)
output_ser.loc[zero_flag]=0
three_flag = (ser==3)
output_ser.loc[three_flag]=1
return output_ser
def build_features(t):
X = t["pr_review_content"]
X = X.map_partitions(
gpu_hashing_vectorizer,
meta=dask.array.from_array(
cupy.sparse.csr_matrix(cupy.zeros(1, dtype=cp.float32))
),
)
X = X.astype(np.float32).persist()
X.compute_chunk_sizes()
return X
def build_labels(reviews_df):
y = reviews_df["pr_review_rating"].map_partitions(map_labels)
y = y.map_partitions(lambda x: cupy.asarray(x, cupy.int32)).persist()
y.compute_chunk_sizes()
return y
def categoricalize(num_sr):
return num_sr.astype("str").str.replace(["0", "1", "2"], ["NEG", "NEUT", "POS"])
def sum_tp_fp(y_y_pred, nclasses):
y, y_pred = y_y_pred
res = cp.zeros((nclasses, 2), order="F")
for i in range(nclasses):
pos_pred_ix = cp.where(y_pred == i)[0]
# short circuit
if len(pos_pred_ix) == 0:
res[i] = 0
break
tp_sum = (y_pred[pos_pred_ix] == y[pos_pred_ix]).sum()
fp_sum = (y_pred[pos_pred_ix] != y[pos_pred_ix]).sum()
res[i][0] = tp_sum
res[i][1] = fp_sum
return res
def precision_score(client, y, y_pred, average="binary"):
nclasses = len(cp.unique(y.map_blocks(lambda x: cp.unique(x)).compute()))
if average == "binary" and nclasses > 2:
raise ValueError
if nclasses < 2:
raise ValueError("Single class precision is not yet supported")
ddh = DistributedDataHandler.create([y, y_pred])
precision_scores = client.compute(
[
client.submit(sum_tp_fp, part, nclasses, workers=[worker])
for worker, part in ddh.gpu_futures
],
sync=True,
)
res = cp.zeros((nclasses, 2), order="F")
for i in precision_scores:
res += i
if average == "binary" or average == "macro":
prec = cp.zeros(nclasses)
for i in range(nclasses):
tp_sum, fp_sum = res[i]
prec[i] = (tp_sum / (tp_sum + fp_sum)).item()
if average == "binary":
return prec[nclasses - 1].item()
else:
return prec.mean().item()
else:
global_tp = cp.sum(res[:, 0])
global_fp = cp.sum(res[:, 1])
return global_tp / (global_tp + global_fp).item()
def local_cm(y_y_pred, unique_labels, sample_weight):
y_true, y_pred = y_y_pred
labels = unique_labels
n_labels = labels.size
# Assume labels are monotonically increasing for now.
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = cp.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
if sample_weight is None:
sample_weight = cp.ones(y_true.shape[0], dtype=np.int64)
else:
sample_weight = cp.asarray(sample_weight)
sample_weight = sample_weight[ind]
cm = cp.sparse.coo_matrix(
(sample_weight, (y_true, y_pred)), shape=(n_labels, n_labels), dtype=cp.float32,
).toarray()
return cp.nan_to_num(cm)
def confusion_matrix(client, y_true, y_pred, normalize=None, sample_weight=None):
unique_classes = cp.unique(y_true.map_blocks(lambda x: cp.unique(x)).compute())
nclasses = len(unique_classes)
ddh = DistributedDataHandler.create([y_true, y_pred])
cms = client.compute(
[
client.submit(
local_cm, part, unique_classes, sample_weight, workers=[worker]
)
for worker, part in ddh.gpu_futures
],
sync=True,
)
cm = cp.zeros((nclasses, nclasses))
for i in cms:
cm += i
with np.errstate(all="ignore"):
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
cm = cp.nan_to_num(cm)
return cm
def accuracy_score(client, y, y_hat):
ddh = DistributedDataHandler.create([y_hat, y])
def _count_accurate_predictions(y_hat_y):
y_hat, y = y_hat_y
y_hat = cp.asarray(y_hat, dtype=y_hat.dtype)
y = cp.asarray(y, dtype=y.dtype)
return y.shape[0] - cp.count_nonzero(y - y_hat)
key = uuid1()
futures = client.compute(
[
client.submit(
_count_accurate_predictions,
worker_future[1],
workers=[worker_future[0]],
key="%s-%s" % (key, idx),
)
for idx, worker_future in enumerate(ddh.gpu_futures)
],
sync=True,
)
return sum(futures) / y.shape[0]
def post_etl_processing(client, train_data, test_data):
# Feature engineering
X_train = build_features(train_data)
X_test = build_features(test_data)
y_train = build_labels(train_data)
y_test = build_labels(test_data)
# Perform ML
model = DistMNB(client=client, alpha=0.001)
model.fit(X_train, y_train)
### this regression seems to be coming from here
y_hat = model.predict(X_test).persist()
# Compute distributed performance metrics
acc = accuracy_score(client, y_test, y_hat)
print("Accuracy: " + str(acc))
prec = precision_score(client, y_test, y_hat, average="macro")
print("Precision: " + str(prec))
cmat = confusion_matrix(client, y_test, y_hat)
print("Confusion Matrix: " + str(cmat))
# Place results back in original Dataframe
ddh = DistributedDataHandler.create(y_hat)
test_preds = to_dask_cudf(
[client.submit(cudf.Series, part) for w, part in ddh.gpu_futures]
)
test_preds = test_preds.map_partitions(categoricalize)
test_data["prediction"] = test_preds
final_data = test_data[["pr_review_sk", "pr_review_rating", "prediction"]].persist()
final_data = final_data.sort_values("pr_review_sk").reset_index(drop=True)
wait(final_data)
return final_data, acc, prec, cmat
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q07_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
item_cols = ["i_item_sk", "i_current_price", "i_category"]
store_sales_cols = ["ss_item_sk", "ss_customer_sk", "ss_sold_date_sk"]
date_cols = ["d_date_sk", "d_year", "d_moy"]
customer_cols = ["c_customer_sk", "c_current_addr_sk"]
customer_address_cols = ["ca_address_sk", "ca_state"]
item_df = table_reader.read("item", relevant_cols=item_cols)
store_sales_df = table_reader.read("store_sales", relevant_cols=store_sales_cols)
date_dim_df = table_reader.read("date_dim", relevant_cols=date_cols)
customer_df = table_reader.read("customer", relevant_cols=customer_cols)
customer_address_df = table_reader.read(
"customer_address", relevant_cols=customer_address_cols
)
if c:
c.create_table("item", item_df, persist=False)
c.create_table("customer", customer_df, persist=False)
c.create_table("store_sales", store_sales_df, persist=False)
c.create_table("date_dim", date_dim_df, persist=False)
c.create_table("customer_address", customer_address_df, persist=False)
return (
item_df,
store_sales_df,
date_dim_df,
customer_df,
customer_address_df,
)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q27_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import spacy
from bdb_tools.readers import build_reader
q27_pr_item_sk = 10002
EOL_CHAR = "."
def read_tables(config, c=None):
### splitting by row groups for better parallelism
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=True,
)
product_reviews_cols = ["pr_item_sk", "pr_review_content", "pr_review_sk"]
product_reviews_df = table_reader.read(
"product_reviews", relevant_cols=product_reviews_cols
)
if c:
c.create_table("product_reviews", product_reviews_df, persist=False)
return product_reviews_df
def ner_parser(df, col_string, batch_size=256):
spacy.require_gpu()
nlp = spacy.load("en_core_web_sm")
docs = nlp.pipe(df[col_string], disable=["tagger", "parser"], batch_size=batch_size)
out = []
for doc in docs:
l = [ent.text for ent in doc.ents if ent.label_ == "ORG"]
val = ", "
l = val.join(l)
out.append(l)
df["company_name_list"] = out
return df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q29_utils.py |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
q29_limit = 100
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"], basepath=config["data_dir"], backend=config["backend"],
)
item_cols = ["i_item_sk", "i_category_id"]
item_df = table_reader.read("item", relevant_cols=item_cols)
ws_cols = ["ws_order_number", "ws_item_sk"]
ws_df = table_reader.read("web_sales", relevant_cols=ws_cols)
if c:
c.create_table('item', item_df, persist=False)
c.create_table('web_sales', ws_df, persist=False)
return item_df, ws_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q27_bert_utils.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Copyright (c) 2019-2020, BlazingSQL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import numpy as np
import torch
import cudf
import time
from torch.utils.dlpack import from_dlpack
from dask.distributed import get_worker
def run_inference_on_df(
df,
model,
vocab_hash_file,
batchsize=64,
sequence_len_th_ls=[512, 256, 128, 64, 32, 16, 8],
):
"""
The function has following steps:
a. Segregate df based on sequence_length (we do this because inference time is prop to sequence length)
b. For each part we run tokenization
c. For each part we run inference using the passed model
Parameters
----------
df: df to run inference on
model: model object to run it with
batchsize:batch size
sequence_len_th_ls: list of sequences to create batches with
Returns
-------
Returns token_d,prediction_d with key=seq_len
"""
## Get max sequence_length for a particular review
df = append_seq_len(df, sequence_len_th_ls, vocab_hash_file)
### Tokenize dataframe
token_d = {}
max_seq_len = max(sequence_len_th_ls)
### Partition each df by sequence length
for sequence_len, sub_df in get_df_partitioned_by_seq(
df, sequence_len_th_ls
).items():
if sequence_len == max_seq_len:
stride = get_stride(max_seq_len)
else:
# -2 for padding
stride = sequence_len - 2
token_d[sequence_len] = tokenize_text_series(
sub_df["pr_review_content"],
sequence_len,
stride,
vocab_hash_file=vocab_hash_file,
)
token_d[sequence_len]["df"] = sub_df
del df
## Run Inference
prediction_d = {}
for seqlen, batch_d in token_d.items():
prediction_d[seqlen] = run_inference_on_tensor(
model, batch_d["token_ar"], batch_d["attention_ar"], batchsize
)
return token_d, prediction_d
## ----Tokenization Utils----
def append_seq_len(df, sequence_len_ls, vocab_hash_file):
"""
Appends the sequence length for each review to tokenize too
The sequence length is the closest max(sequence_len_ls)
Parameters:
____________
df: input dataframe
max_seq_len: max sequence length to consider
vocab_hash_file: vocab hash_file to use
"""
df["input_id"] = cp.arange(0, len(df), dtype=np.int32)
### here stride is set to ensure non repeated rows as we want to gather sequence_length
### -2 because of padding of special chars
d = tokenize_text_series(
df["pr_review_content"],
max(sequence_len_ls),
stride=max(sequence_len_ls) - 2,
vocab_hash_file=vocab_hash_file,
)
seq_len_df = get_seq_len_df(d["metadata"], sequence_len_ls)
seq_len_df = df[["pr_review_sk", "pr_item_sk", "input_id"]].merge(seq_len_df)
seq_len_df = seq_len_df.groupby("pr_review_sk").sequence_len.max()
seq_len_df = seq_len_df.reset_index(drop=False)
df = df.merge(seq_len_df)
output_columns = ["pr_review_sk", "pr_review_content", "pr_item_sk", "sequence_len"]
return df[output_columns]
def get_seq_len_df(metadata, sequence_len_ls):
"""
Returns the sequence_length from the sequence_len_ls to be used
for inference
Args:
metadata: nx3 cupy array(input_id,start_id,stop_id)
sequence_len_ls: list of int sequence_lengths we can have
(eg:[128,256,512])
Returns:
a Cudf Dataframe ([input_id,start_id,stop_id])
"""
sequence_len_ls = sorted(sequence_len_ls)
metadata_df = cudf.DataFrame()
metadata_df["input_id"] = metadata[:, 0]
metadata_df["start_id"] = metadata[:, 1]
metadata_df["stop_id"] = metadata[:, 2]
metadata_df["sequence_len"] = max(sequence_len_ls)
for output_size in sorted(sequence_len_ls, reverse=True):
output_flag = metadata_df["stop_id"] < output_size
metadata_df["sequence_len"][output_flag] = output_size
return metadata_df
def get_df_partitioned_by_seq(df, sequence_len_ls):
"""
We get dataframe partitioned by sequences
"""
sq_part_d = {}
for s_len in sequence_len_ls:
subset_df = df[df["sequence_len"] == s_len].reset_index(drop=True)
sq_part_d[s_len] = subset_df
return sq_part_d
def tokenize_text_series(text_ser, seq_len, stride, vocab_hash_file):
"""
This function tokenizes a text series using the bert subword_tokenizer and vocab-hash
Parameters
__________
text_ser: Text Series to tokenize
seq_len: Sequence Length to use (We add to special tokens for ner classification job)
stride : Stride for the tokenizer
vocab_hash_file: vocab_hash_file to use (Created using `perfect_hash.py` with compact flag)
Returns
_______
A dictionary with these keys {'token_ar':,'attention_ar':,'metadata':}
"""
if len(text_ser) == 0:
return {"token_ar": None, "attention_ar": None, "metadata": None}
max_num_chars = text_ser.str.len().sum() + 1
max_rows_tensor = len(text_ser) * 2
max_length = seq_len - 2
tokens, attention_masks, metadata = text_ser.str.subword_tokenize(
vocab_hash_file,
do_lower=False,
max_num_strings=max_rows_tensor,
max_rows_tensor=max_rows_tensor,
max_num_chars=max_num_chars,
stride=stride,
max_length=max_length,
do_truncate=False,
)
del text_ser
### reshape metadata into a matrix
metadata = metadata.reshape(-1, 3)
tokens = tokens.reshape(-1, max_length)
output_rows = tokens.shape[0]
padded_tokens = cp.zeros(shape=(output_rows, seq_len), dtype=np.uint32)
# Mark sequence start with [CLS] token to mark start of sequence
padded_tokens[:, 1:-1] = tokens
padded_tokens[:, 0] = 101
# Mark end of sequence [SEP]
seq_end_col = padded_tokens.shape[1] - (padded_tokens[:, ::-1] != 0).argmax(1)
padded_tokens[cp.arange(padded_tokens.shape[0]), seq_end_col] = 102
del tokens
## Attention mask
attention_masks = attention_masks.reshape(-1, max_length)
padded_attention_mask = cp.zeros(shape=(output_rows, seq_len), dtype=np.uint32)
padded_attention_mask[:, 1:-1] = attention_masks
# Mark sequence start with 1
padded_attention_mask[:, 0] = 1
# Mark sequence end with 1
padded_attention_mask[cp.arange(padded_attention_mask.shape[0]), seq_end_col] = 1
del seq_end_col
del attention_masks
return {
"token_ar": padded_tokens,
"attention_ar": padded_attention_mask,
"metadata": metadata,
}
## ----Inference Utils----
def run_inference_on_tensor(model, token_ar, attention_ar, batchsize):
"""
Runs inference using the model for the given token_ar,attention_ar, batchsize
Parameters:
__________
model: model to use
token_ar: cupy unsigned int array of shape (n_input_seqs x sequence_length) containing subword tokens
attention_ar: cupy attention unsigned int array of shape (n_input_seqs x sequence_length) containing valid attention mask
batch_size: batchsize to use
Returns
________
Predicted tensor of the shape (n_input_seqs x sequence_length)
"""
if token_ar is None:
return None
prediction_ls = []
batch_st = 0
total_batches = token_ar.shape[0] // batchsize + 1
with torch.no_grad():
token_tensor = from_dlpack(token_ar.astype(np.int32).toDlpack()).long()
attention_tensor = from_dlpack(attention_ar.astype(np.int32).toDlpack()).long()
for batch_index in range(0, total_batches):
batch_st = batch_index * batchsize
batch_end = min(batch_st + batchsize, token_tensor.shape[0])
if batch_end == batch_st:
break
current_batch_tensor = token_tensor[batch_st:batch_end]
current_batch_attention = attention_tensor[batch_st:batch_end]
outputs = model(current_batch_tensor, current_batch_attention)
prediction_ls.append(outputs[0])
del current_batch_tensor
del current_batch_attention
del token_tensor, attention_tensor
return torch.cat(prediction_ls).argmax(dim=2)
### Stride Utils
def get_stride(seq_len):
"""
Stride to use given a sequence length
Added to ensure we use the same stride across the query
"""
max_len = seq_len - 2
stride = int(max_len * 0.5)
return stride
### Model loading utils
def create_vocab_table(vocabpath):
"""
Create Vocabulary tables from the vocab.txt file
Parameters:
___________
vocabpath: Path of vocablary file
Returns:
___________
id2vocab: np.array, dtype=<U5
vocab2id: dict that maps strings to int
"""
id2vocab = []
vocab2id = {}
with open(vocabpath) as f:
for index, line in enumerate(f):
token = line.split()[0]
id2vocab.append(token)
vocab2id[token] = index
return np.array(id2vocab), vocab2id
def load_model(model_path):
"""
Loads and returns modle from the given model path
"""
from transformers import AutoModelForTokenClassification
model = AutoModelForTokenClassification.from_pretrained(model_path)
model.half()
model.cuda()
model.eval()
return model
def del_model_attribute():
"""
Deletes model attribute, freeing up memory
"""
import torch
import gc
worker = get_worker()
if hasattr(worker, "q27_model"):
del worker.q27_model
torch.cuda.empty_cache()
gc.collect()
return
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/text.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import cudf
import pandas as pd
import numpy as np
EOL_CHAR = "è"
def create_sentences_from_reviews(
df, review_column="pr_review_content", end_of_line_char=EOL_CHAR,
):
sentences = df[review_column].str.split(end_of_line_char)
if isinstance(df, cudf.DataFrame):
out = cudf.DataFrame({"sentence": sentences, "review_idx_global_pos": df.pr_review_sk})
else:
out = pd.DataFrame({"sentence": sentences, "review_idx_global_pos": df.pr_review_sk})
out= out.explode("sentence", ignore_index=True)
out["sentence"] = out.sentence.replace('', np.nan)
out = out.dropna().reset_index(drop=True)
out["review_idx_global_pos"] = out["review_idx_global_pos"].astype("int32")
return out
def create_words_from_sentences(
df,
sentence_column="sentence",
global_position_column="sentence_tokenized_global_pos",
delimiter=" ",
):
cleaned_sentences = df[sentence_column].str.replace(".", "", regex=False)
for char in [",", ";", "-", '\"']:
cleaned_sentences = cleaned_sentences.str.replace(char, "", regex=False)
normalized_sentences = cleaned_sentences.str.strip()
words = normalized_sentences.str.split(delimiter)
if isinstance(df, cudf.DataFrame):
out = cudf.DataFrame({"word": words, "sentence_idx_global_pos": df[global_position_column]})
else:
out = pd.DataFrame({"word": words, "sentence_idx_global_pos": df[global_position_column]})
out = out.explode("word", ignore_index=True)
out["word"] = out.word.replace('', np.nan)
out = out.dropna().reset_index(drop=True)
return out
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q16_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
websale_cols = [
"ws_order_number",
"ws_item_sk",
"ws_warehouse_sk",
"ws_sold_date_sk",
"ws_sales_price",
]
web_returns_cols = ["wr_order_number", "wr_item_sk", "wr_refunded_cash"]
date_cols = ["d_date", "d_date_sk"]
item_cols = ["i_item_sk", "i_item_id"]
warehouse_cols = ["w_warehouse_sk", "w_state"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
web_sales_df = table_reader.read("web_sales", relevant_cols=websale_cols)
web_returns_df = table_reader.read("web_returns", relevant_cols=web_returns_cols)
date_dim_df = table_reader.read("date_dim", relevant_cols=date_cols)
item_df = table_reader.read("item", relevant_cols=item_cols)
warehouse_df = table_reader.read("warehouse", relevant_cols=warehouse_cols)
if c:
c.create_table("web_sales", web_sales_df, persist=False)
c.create_table("web_returns", web_returns_df, persist=False)
c.create_table("date_dim", date_dim_df, persist=False)
c.create_table("item", item_df, persist=False)
c.create_table("warehouse", warehouse_df, persist=False)
return web_sales_df, web_returns_df, date_dim_df, item_df, warehouse_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q15_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
# --store_sales date range
q15_startDate = "2001-09-02"
# --+1year
q15_endDate = "2002-09-02"
q15_store_sk = 10
store_sales_cols = ["ss_sold_date_sk", "ss_net_paid", "ss_store_sk", "ss_item_sk"]
date_cols = ["d_date", "d_date_sk"]
item_cols = ["i_item_sk", "i_category_id"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
store_sales_df = table_reader.read("store_sales", relevant_cols=store_sales_cols)
date_dim_df = table_reader.read("date_dim", relevant_cols=date_cols)
item_df = table_reader.read("item", relevant_cols=item_cols)
if c:
c.create_table("store_sales", store_sales_df, persist=False)
c.create_table("date_dim", date_dim_df, persist=False)
c.create_table("item", item_df, persist=False)
return store_sales_df, date_dim_df, item_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q27_get_review_sentence_utils.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cudf
import torch
from numba import cuda
from .q27_bert_utils import get_stride
import cupy as cp
import logging
import numpy as np
def get_review_sentence(
tokenized_d, predicted_label_t, vocab2id, id2vocab, org_labels=[2, 5]
):
"""
Given a tokenized_d and predicted_label_t
We return the sentences that contain labels for either 5 or 2
#### Detailed Workflow ####
## This function contains the review sentence gathering part of workflow
# Given a tokenized_d and predicted_label_t
# we gather the sentences that contain labels for it
### The workflow is as follows:
### First we get org_df which contains the location of everything predicted as an organization
### Second we get all the sentence boundaries
### Third, we find the sentences that correspond to our predicted label
### Fourth, From that org_sentences_table, we create a sentence matrix which contains all tokens that will go in that sentence
### Fifth, We convert that matrix into strings (This step happens on CPU)
"""
seq_len = tokenized_d["token_ar"].shape[1]
stride = get_stride(seq_len)
metadata_df = cudf.DataFrame()
metadata_df = cudf.DataFrame()
metadata_df["input_text_index"] = tokenized_d["metadata"][:, 0]
## +1 for clx class
metadata_df["start_index"] = tokenized_d["metadata"][:, 1] + 1
metadata_df["stop_index"] = tokenized_d["metadata"][:, 2] + 1
metadata_df["seq_row"] = cp.arange(len(metadata_df))
pr_label_f = None
for label in org_labels:
if pr_label_f is None:
pr_label_f = predicted_label_t == label
else:
pr_label_f = pr_label_f | (predicted_label_t == label)
## Step1: Get ORG
org_df = get_org_df(pr_label_f, metadata_df, seq_len)
### Because we have repeations in our boundaries we
### create a valid region boundary to prevent copying
valid_region = (seq_len - 2) - stride + 1
### This gives us all the valid sentence boundaries
### Step2: Get Sentence Boundary
sentence_boundary_df = get_sentence_boundaries(
metadata_df,
tokenized_d["token_ar"],
stride=stride,
fs_index_ls=[vocab2id["."], vocab2id["##."]],
)
## Step3: df contains the sentences that intersect with org
## Sentence containing ORG
org_senten_df = get_org_sentences(sentence_boundary_df, org_df)
org_senten_df = org_senten_df.reset_index(drop=False)
## Step4:Flatten these sentences and add them to the output matrix
output_mat = cp.zeros(
shape=(len(org_senten_df["org_seq_row"]), 1024 * 2), dtype=np.int32
)
label_ar = cp.zeros(shape=(len(org_senten_df["org_seq_row"]), 1), dtype=np.int32)
input_mat = tokenized_d["token_ar"]
l_r_ar = org_senten_df["l_fs_seq_row"]
l_c_ar = org_senten_df["l_fs_seq_col"]
r_r_ar = org_senten_df["r_fs_seq_row"]
r_c_ar = org_senten_df["r_fs_seq_col"]
o_r_ar = org_senten_df["org_seq_row"]
o_c_ar = org_senten_df["org_seq_col"]
get_output_sen_word_kernel.forall(len(l_r_ar))(
l_r_ar,
l_c_ar,
r_r_ar,
r_c_ar,
o_r_ar,
o_c_ar,
valid_region,
input_mat,
output_mat,
label_ar,
)
output_mat = cp.asnumpy(output_mat)
label_ar = cp.asnumpy(label_ar).flatten()
### Step5: Detokenize the matrix
### CPU logic to gather sentences begins here
sen_ls = []
target_ls = []
for row, t_num in zip(output_mat, label_ar):
s, t = convert_to_sentence(row, t_num, id2vocab)
sen_ls.append(s)
target_ls.append(t)
df = cudf.DataFrame()
df["review_sentence"] = cudf.Series(sen_ls, dtype="str")
df["company_name"] = cudf.Series(target_ls, dtype="str")
df["input_text_index"] = org_senten_df["input_text_index"]
return df
def get_sentence_boundaries(metadata_df, token_ar, stride, fs_index_ls):
"""
Given token array and meta-data we create sentence boundaries
We consider a sentence boundary as one which is at eol-chars (`##.`,`.`) or start/end of a review
"""
seq_len = token_ar.shape[1]
fullstop_flag = None
for fs_token_idx in fs_index_ls:
if fullstop_flag is None:
fullstop_flag = token_ar == fs_token_idx
else:
fullstop_flag = (fullstop_flag) | (token_ar == fs_token_idx)
fullstop_row, fullstop_col = cp.nonzero(fullstop_flag)
min_row_df = (
metadata_df.groupby("input_text_index").seq_row.min().reset_index(drop=False)
)
min_row_df.rename(columns={"seq_row": "min_row"}, inplace=True)
max_row_df = (
metadata_df.groupby("input_text_index").seq_row.max().reset_index(drop=False)
)
max_row_df.rename(columns={"seq_row": "max_row"}, inplace=True)
metadata_df = metadata_df.merge(min_row_df).merge(max_row_df)
### Can filter to only sequences that have the org
## if below becomes a bottleneck
fullstop_df = cudf.DataFrame()
fullstop_df["seq_row"] = cudf.Series(fullstop_row)
fullstop_df["fs_seq_col"] = cudf.Series(fullstop_col)
fullstop_df = fullstop_df.merge(metadata_df)
fullstop_df.rename(columns={"seq_row": "fs_seq_row"}, inplace=True)
first_row_df = cudf.DataFrame()
first_row_df["input_text_index"] = min_row_df["input_text_index"]
first_row_df["fs_seq_row"] = min_row_df["min_row"]
first_row_df["fs_seq_col"] = 1
first_row_df["min_row"] = min_row_df["min_row"]
first_row_df = first_row_df.merge(max_row_df[["input_text_index", "max_row"]])
last_row_df = cudf.DataFrame()
last_row_df["input_text_index"] = max_row_df["input_text_index"]
last_row_df["fs_seq_row"] = max_row_df["max_row"]
last_row_df["fs_seq_col"] = seq_len - 1
last_row_df["max_row"] = max_row_df["max_row"]
last_row_df = last_row_df.merge(min_row_df[["input_text_index", "min_row"]])
fullstop_df = cudf.concat([fullstop_df, first_row_df, last_row_df])
## -2-> for padding
valid_region = (seq_len - 2) - stride + 1
### only keep sentences in the valid_region
valid_flag = fullstop_df["fs_seq_col"] < valid_region
valid_flag = valid_flag | (fullstop_df["fs_seq_row"] == fullstop_df["max_row"])
fullstop_df = fullstop_df[valid_flag]
fullstop_df["flat_loc_fs"] = (
fullstop_df["fs_seq_row"] * seq_len + fullstop_df["fs_seq_col"]
)
return fullstop_df[["input_text_index", "fs_seq_row", "fs_seq_col", "flat_loc_fs"]]
def get_org_sentences(sentence_boundary_df, org_df):
"""
Given the sentence_boundary_df and org_df, returns the nearest sentence boundries that contain org.
Returns a org_senten_df
"""
merged_df = sentence_boundary_df.merge(org_df, on="input_text_index")
merged_df["left_loc"] = merged_df["flat_loc_org"] - merged_df["flat_loc_fs"]
merged_df["right_loc"] = merged_df["flat_loc_fs"] - merged_df["flat_loc_org"]
### Better way to get the closeset row/col maybe
valid_left_loc = (
merged_df[merged_df["left_loc"] >= 0]
.sort_values(by=["flat_loc_org", "left_loc"])
.groupby("flat_loc_org")
.nth(0)
)
cols_2_keep = [
"input_text_index",
"fs_seq_row",
"fs_seq_col",
"org_seq_row",
"org_seq_col",
]
valid_left_loc = valid_left_loc[cols_2_keep]
valid_left_loc.rename(
columns={"fs_seq_row": "l_fs_seq_row", "fs_seq_col": "l_fs_seq_col"},
inplace=True,
)
valid_left_loc = valid_left_loc.reset_index(drop=False)
### Better way to get the closeset row/col maybe
valid_right_loc = (
merged_df[merged_df["right_loc"] > 0]
.sort_values(by=["flat_loc_org", "right_loc"])
.groupby("flat_loc_org")
.nth(0)
)
valid_right_loc.rename(
columns={"fs_seq_row": "r_fs_seq_row", "fs_seq_col": "r_fs_seq_col"},
inplace=True,
)
valid_right_loc = valid_right_loc[["r_fs_seq_row", "r_fs_seq_col"]].reset_index(
drop=False
)
valid_df = valid_left_loc.merge(valid_right_loc)
valid_df = valid_df.set_index(["flat_loc_org"])
return valid_df
@cuda.jit
def get_output_sen_word_kernel(
start_r_ar,
start_c_ar,
end_r_ar,
end_c_ar,
t_row_ar,
t_col_ar,
valid_region,
mat,
output_mat,
label_ar,
):
"""
Fills the output_matrix and label_ar for each review sentence
"""
rnum = cuda.grid(1)
if rnum < (start_r_ar.size): # boundary guard
start_r, start_c = start_r_ar[rnum], start_c_ar[rnum]
end_r, end_c = end_r_ar[rnum], end_c_ar[rnum]
t_row, t_col = t_row_ar[rnum], t_col_ar[rnum]
i = 0
for curr_r in range(start_r, end_r + 1):
if curr_r == start_r:
col_loop_s = start_c
else:
col_loop_s = 1
if curr_r == end_r:
col_loop_e = end_c
else:
col_loop_e = valid_region
for curr_c in range(col_loop_s, col_loop_e):
token = mat[curr_r][curr_c]
if token != 0:
output_mat[rnum][i] = token
i += 1
if (curr_r == t_row) and (curr_c == t_col):
label_ar[rnum] = i
return
### CPU part of workflow
def convert_to_sentence(row, target_index, id2vocab):
"""
Given a row of token_ids , we convert to a sentence
We also combine subtokens back to get back the input sentence
"""
row = row[row != 0]
output_ls = []
tr_index = -1
row = id2vocab[row]
for t_num, token in enumerate(row):
if t_num == target_index:
tr_index = len(output_ls) - 1
## We anyways skip the first full-stop and we dont want to combine that
### eg: test. new sen ---tokenized-> test ##. new sen
### we only will want to capture "new sen"
if len(output_ls) > 0 and token.startswith("##"):
output_ls[-1] += token[2:]
else:
output_ls.append(token)
if output_ls[0] in [".", "##."]:
output_sen = " ".join(output_ls[1:])
else:
output_sen = " ".join(output_ls)
return output_sen, output_ls[tr_index]
def get_org_df(pr_label_f, metadata_df, seq_len):
"""
Returns the org_df given pr_label_f,metadata_df,
"""
org_r, org_c = torch.nonzero(pr_label_f, as_tuple=True)
org_df = cudf.DataFrame()
org_df["seq_row"] = cudf.Series(org_r)
org_df["org_seq_col"] = cudf.Series(org_c)
org_df = org_df.merge(metadata_df)
org_df = org_df.rename(columns={"seq_row": "org_seq_row"})
org_df["flat_loc_org"] = org_df["org_seq_row"] * seq_len + org_df["org_seq_col"]
### Trim overlapping and invalid predictions
flag = (org_df["org_seq_col"] >= org_df["start_index"]) & (
org_df["org_seq_col"] <= org_df["stop_index"]
)
org_df = org_df[flag]
return org_df[["org_seq_row", "org_seq_col", "input_text_index", "flat_loc_org"]]
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q18_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import numpy as np
import cudf
import pandas as pd
from cudf._lib.strings import find_multiple
from bdb_tools.readers import build_reader
q18_startDate = "2001-05-02"
# --+90days
q18_endDate = "2001-09-02"
EOL_CHAR = "è"
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"], basepath=config["data_dir"], backend=config["backend"],
)
store_sales_cols = [
"ss_store_sk",
"ss_sold_date_sk",
"ss_net_paid",
]
date_cols = ["d_date_sk", "d_date"]
store_cols = ["s_store_sk", "s_store_name"]
store_sales = table_reader.read("store_sales", relevant_cols=store_sales_cols)
date_dim = table_reader.read("date_dim", relevant_cols=date_cols)
store = table_reader.read("store", relevant_cols=store_cols)
### splitting by row groups for better parallelism
pr_table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=True,
backend=config["backend"],
)
product_reviews_cols = ["pr_review_date", "pr_review_content", "pr_review_sk"]
product_reviews = pr_table_reader.read(
"product_reviews", relevant_cols=product_reviews_cols,
)
if c:
c.create_table("store", store, persist=False)
c.create_table("store_sales", store_sales, persist=False)
c.create_table("date_dim", date_dim, persist=False)
c.create_table("product_reviews", product_reviews, persist=False)
return store_sales, date_dim, store, product_reviews
def create_found_reshaped_with_global_pos(found, targets):
"""Given the dataframe created by mapping find_targets_in_reviews,
create a new dataframe in which the nonzero values in each row are exploded
to get their own row. Each row will contain the word, its mapping in the column order,
and the pr_review_sk for the review from which it came.
Having these as two separate functions makes managing dask metadata easier.
"""
if isinstance(found, cudf.DataFrame):
target_df = cudf.DataFrame({"word": targets}).reset_index(drop=False)
else:
target_df = pd.DataFrame({"word": targets}).reset_index(drop=False)
target_df.columns = ["word_mapping", "word"]
df_clean = found.drop(["pr_review_sk"], axis=1)
row_idxs, col_idxs = df_clean.values.nonzero()
if isinstance(found, cudf.DataFrame):
found_reshaped = cudf.DataFrame(
{"word_mapping": col_idxs, "pr_review_sk": found["pr_review_sk"].iloc[row_idxs]}
)
else:
found_reshaped = pd.DataFrame(
{"word_mapping": col_idxs, "pr_review_sk": found["pr_review_sk"].iloc[row_idxs]}
)
found_reshaped = found_reshaped.merge(target_df, on="word_mapping", how="inner")[
["word", "pr_review_sk"]
]
return found_reshaped
def pandas_find_multiple(lowered, targets):
tmp = []
for target in targets:
tmp.append(lowered.str.find(target))
return [list(x) for x in zip(*tmp)]
def find_targets_in_reviews_helper(ddf, targets, str_col_name="pr_review_content"):
"""returns a N x K matrix, where N is the number of rows in ddf that
contain one of the target words and K is the number of words in targets.
If a target word is found in a review, the value in that row, column
is non-zero.
At the end, any row with non-zero values is returned.
"""
lowered = ddf[str_col_name].str.lower()
## TODO: Do the replace/any in cupy land before going to cuDF
if isinstance(ddf, cudf.DataFrame):
resdf = cudf.DataFrame(
cp.asarray(
cudf.Series(find_multiple.find_multiple(lowered._column, targets._column)).explode()
).reshape(-1, len(targets))
)
else:
resdf = pd.DataFrame(
np.asarray(
pd.Series(pandas_find_multiple(lowered, targets)).explode()
).reshape(-1, len(targets))
)
resdf = resdf.replace([0, -1], [1, 0])
found_mask = resdf.any(axis=1)
resdf["pr_review_sk"] = ddf["pr_review_sk"].reset_index(drop=True)
found = resdf.loc[found_mask]
return create_found_reshaped_with_global_pos(found, targets)
def find_relevant_reviews(df, targets, str_col_name="pr_review_content"):
"""
This function finds the reviews containg target stores and returns the
relevant reviews
"""
if isinstance(df, cudf.DataFrame):
targets = cudf.Series(targets)
else:
targets = pd.Series(targets)
targets_lower = targets.str.lower()
reviews_found = find_targets_in_reviews_helper(df, targets_lower)[
["word", "pr_review_sk"]
]
combined = reviews_found.merge(
df[["pr_review_date", "pr_review_sk"]], how="inner", on=["pr_review_sk"]
)
return combined
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q02_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
q02_item_sk = 10001
q02_limit = 30
q02_session_timeout_inSec = 3600
q02_MAX_ITEMS_PER_BASKET = 5000000
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
wcs_cols = ["wcs_user_sk", "wcs_item_sk", "wcs_click_date_sk", "wcs_click_time_sk"]
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table("web_clickstreams", wcs_df, persist=False)
return wcs_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/readers.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABC, abstractmethod
import os
TABLE_NAMES = [
"customer",
"customer_address",
"customer_demographics",
"date_dim",
"household_demographics",
"income_band",
"inventory",
"item",
"item_marketprices",
"parquet",
"product_reviews",
"promotion",
"reason",
"ship_mode",
"store",
"store_returns",
"store_sales",
"time_dim",
"warehouse",
"web_clickstreams",
"web_page",
"web_returns",
"web_sales",
"web_site",
]
### these tables with non string columns should easily fit on 1 gpu
## At sf-100k, product_reviews=200M , customer=31M,
## customer_address=15M, item=5M, item_market_prices=28M
## in most queries apart from nlp ones we dont read string columns
## so these should scale
## see https://github.com/rapidsai/gpu-bdb/issues/66 for size details
SMALL_TABLES = ["customer", "customer_address", "item", "item_marketprices"]
### these tables are not expected to grow with scale factors
## these should fit easily with all columns on a single gpu
## see https://github.com/rapidsai/gpu-bdb/issues/66 for size details
SUPER_SMALL_TABLES = [
"date_dim",
"time_dim",
"web_site",
"income_band",
"ship_mode",
"household_demographics",
"promotion",
"web_page",
"warehouse",
"reason",
"store",
]
class Reader(ABC):
"""Base class for GPU-BDB File Readers"""
@abstractmethod
def read(self, filepath, **kwargs):
""""""
@abstractmethod
def show_tables(self):
""""""
class ParquetReader(Reader):
"""Read GPU-BDB Parquet data"""
def __init__(
self, basepath, split_row_groups=False, backend="GPU",
):
if backend == "GPU":
import dask_cudf
self.backend = dask_cudf
else:
import dask.dataframe
self.backend = dask.dataframe
self.table_path_mapping = {
table: os.path.join(basepath, table, "*.parquet") for table in TABLE_NAMES
}
self.split_row_groups = split_row_groups
def show_tables(self):
return self.table_path_mapping.keys()
def read(self, table, relevant_cols=None, **kwargs):
filepath = self.table_path_mapping[table]
# we ignore split_row_groups if gather_statistics=False
if self.split_row_groups:
df = self.backend.read_parquet(
filepath,
columns=relevant_cols,
split_row_groups=self.split_row_groups,
gather_statistics=True,
**kwargs,
)
else:
df = self.backend.read_parquet(
filepath,
columns=relevant_cols,
split_row_groups=self.split_row_groups,
gather_statistics=False,
**kwargs,
)
## Repartition small tables to a single partition to prevent
## distributed merges when possible
## Only matters when partition size<3GB
if (table in SMALL_TABLES) or (table in SUPER_SMALL_TABLES):
df = df.repartition(npartitions=1)
return df
class ORCReader(Reader):
"""Read GPU-BDB ORC data"""
# TODO
def __init__(self, basepath):
pass
class CSVReader(Reader):
"""Read GPU-BDB CSV data"""
# TODO
def __init__(self, basepath):
pass
def build_reader(basepath, data_format="parquet", **kwargs):
assert data_format in ("csv", "parquet", "orc")
if data_format in ("csv",):
return CSVReader(basepath=basepath, **kwargs)
elif data_format in ("parquet",):
return ParquetReader(basepath=basepath, **kwargs)
elif data_format in ("orc",):
return ORCReader(basepath=basepath, **kwargs)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/__init__.py | # Copyright (c) 2020, NVIDIA CORPORATION.
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q05_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import cupy as cp
import numpy as np
import cuml
import sklearn
from cuml.metrics import confusion_matrix as cuml_confusion_matrix
from bdb_tools.cupy_metrics import cupy_precision_score
from bdb_tools.readers import build_reader
from sklearn.metrics import (
roc_auc_score,
precision_score,
confusion_matrix
)
# Logistic Regression params
# solver = "LBFGS" Used by passing `penalty=None` or "l2"
# step_size = 1 Not used
# numCorrections = 10 Not used
iterations = 100
C = 10_000 # reg_lambda = 0 hence C for model is a large value
convergence_tol = 1e-9
wcs_columns = ["wcs_item_sk", "wcs_user_sk"]
items_columns = ["i_item_sk", "i_category", "i_category_id"]
customer_columns = ["c_customer_sk", "c_current_cdemo_sk"]
customer_dem_columns = ["cd_demo_sk", "cd_gender", "cd_education_status"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
item_ddf = table_reader.read("item", relevant_cols=items_columns, index=False)
customer_ddf = table_reader.read(
"customer", relevant_cols=customer_columns, index=False
)
customer_dem_ddf = table_reader.read(
"customer_demographics", relevant_cols=customer_dem_columns, index=False
)
wcs_ddf = table_reader.read(
"web_clickstreams", relevant_cols=wcs_columns, index=False
)
if c:
c.create_table("web_clickstreams", wcs_ddf, persist=False)
c.create_table("customer", customer_ddf, persist=False)
c.create_table("item", item_ddf, persist=False)
c.create_table("customer_demographics", customer_dem_ddf, persist=False)
return (item_ddf, customer_ddf, customer_dem_ddf)
def build_and_predict_model(ml_input_df):
"""
Create a standardized feature matrix X and target array y.
Returns the model and accuracy statistics
"""
import cudf
feature_names = ["college_education", "male"] + [
"clicks_in_%d" % i for i in range(1, 8)
]
X = ml_input_df[feature_names]
# Standardize input matrix
X = (X - X.mean()) / X.std()
y = ml_input_df["clicks_in_category"]
if isinstance(ml_input_df, cudf.DataFrame):
model_backend = cuml.LogisticRegression
else:
model_backend = sklearn.linear_model.LogisticRegression
model = model_backend(
tol=convergence_tol,
penalty="none",
fit_intercept=True,
max_iter=iterations,
C=C,
)
model.fit(X, y)
#
# Predict and evaluate accuracy
# (Should be 1.0) at SF-1
#
results_dict = {}
y_pred = model.predict(X)
if isinstance(ml_input_df, cudf.DataFrame):
results_dict["auc"] = roc_auc_score(y.values_host, y_pred.values_host)
results_dict["precision"] = cupy_precision_score(cp.asarray(y), cp.asarray(y_pred))
results_dict["confusion_matrix"] = cuml_confusion_matrix(
cp.asarray(y, dtype="int32"), cp.asarray(y_pred, dtype="int32")
)
else:
results_dict["auc"] = roc_auc_score(y.to_numpy(), y_pred)
results_dict["precision"] = precision_score(y.to_numpy(), y_pred)
results_dict["confusion_matrix"] = confusion_matrix(y.to_numpy(dtype="int32"), y_pred)
results_dict["output_type"] = "supervised"
return results_dict
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/utils.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import os
import shutil
import socket
import re
import argparse
import time
import subprocess
from datetime import datetime
from collections.abc import Iterable
import glob
import dask
import traceback
import yaml
import sys
from collections import OrderedDict
from collections.abc import MutableMapping
import numpy as np
import cudf
import dask_cudf
import pandas as pd
import dask.dataframe as dd
from dask import delayed
from dask.utils import parse_bytes
from dask_cuda import LocalCUDACluster
from dask.distributed import Client, wait, performance_report, SSHCluster
import json
import gspread
from oauth2client.service_account import ServiceAccountCredentials
#################################
# Benchmark Timing
#################################
def benchmark(func, *args, **kwargs):
csv = kwargs.pop("csv", True)
compute_result = kwargs.pop("compute_result", False)
name = func.__name__
t0 = time.time()
result = func(*args, **kwargs)
elapsed_time = time.time() - t0
logging_info = {}
logging_info["elapsed_time_seconds"] = elapsed_time
logging_info["function_name"] = name
if compute_result:
if isinstance(result, dask_cudf.DataFrame):
len_tasks = [dask.delayed(len)(df) for df in result.to_delayed()]
else:
len_tasks = []
for read_df in result:
len_tasks += [dask.delayed(len)(df) for df in read_df.to_delayed()]
compute_st = time.time()
results = dask.compute(*len_tasks)
compute_et = time.time()
logging_info["compute_time_seconds"] = compute_et - compute_st
logdf = pd.DataFrame.from_dict(logging_info, orient="index").T
if csv:
logdf.to_csv(f"benchmarked_{name}.csv", index=False)
else:
print(logdf)
return result
#################################
# Result Writing
#################################
def write_result(payload, filetype="parquet", output_directory="./"):
"""
"""
if isinstance(payload, MutableMapping):
if payload.get("output_type", None) == "supervised":
write_supervised_learning_result(
result_dict=payload,
filetype=filetype,
output_directory=output_directory,
)
else:
write_clustering_result(
result_dict=payload,
filetype=filetype,
output_directory=output_directory,
)
elif isinstance(payload, (cudf.DataFrame, dd.DataFrame, pd.DataFrame)):
write_etl_result(
df=payload, filetype=filetype, output_directory=output_directory
)
else:
raise ValueError("payload must be a dict or a dataframe.")
def write_etl_result(df, filetype="parquet", output_directory="./"):
assert filetype in ["csv", "parquet"]
QUERY_NUM = get_query_number()
if filetype == "csv":
output_path = f"{output_directory}q{QUERY_NUM}-results.csv"
if os.path.exists(output_path):
shutil.rmtree(output_path)
if not os.path.exists(output_path):
os.mkdir(output_path)
df.to_csv(output_path, header=True, index=False)
else:
output_path = f"{output_directory}q{QUERY_NUM}-results.parquet"
if os.path.exists(output_path):
if os.path.isdir(output_path):
## to remove existing directory
shutil.rmtree(output_path)
else:
## to remove existing single parquet file
os.remove(output_path)
if isinstance(df, dd.DataFrame):
df.to_parquet(output_path, write_index=False)
else:
df.to_parquet(
f"{output_directory}q{QUERY_NUM}-results.parquet", index=False
)
def write_result_q05(results_dict, output_directory="./", filetype=None):
"""
Results are a text file due to the structure and tiny size
Filetype argument added for compatibility. Is not used.
"""
with open(f"{output_directory}q05-metrics-results.txt", "w") as outfile:
outfile.write("Precision: %f\n" % results_dict["precision"])
outfile.write("AUC: %f\n" % results_dict["auc"])
outfile.write("Confusion Matrix:\n")
cm = results_dict["confusion_matrix"]
outfile.write(
"%8.1f %8.1f\n%8.1f %8.1f\n" % (cm[0, 0], cm[0, 1], cm[1, 0], cm[1, 1])
)
def write_supervised_learning_result(result_dict, output_directory, filetype="csv"):
assert filetype in ["csv", "parquet"]
QUERY_NUM = get_query_number()
if QUERY_NUM == "05":
write_result_q05(result_dict, output_directory)
else:
df = result_dict["df"]
acc = result_dict["acc"]
prec = result_dict["prec"]
cmat = result_dict["cmat"]
with open(f"{output_directory}q{QUERY_NUM}-metrics-results.txt", "w") as out:
out.write("Precision: %s\n" % prec)
out.write("Accuracy: %s\n" % acc)
out.write(
"Confusion Matrix: \n%s\n"
% (str(cmat).replace("[", " ").replace("]", " ").replace(".", ""))
)
if filetype == "csv":
df.to_csv(
f"{output_directory}q{QUERY_NUM}-results.csv", header=False, index=None
)
else:
df.to_parquet(
f"{output_directory}q{QUERY_NUM}-results.parquet", write_index=False
)
def write_clustering_result(result_dict, output_directory="./", filetype="csv"):
"""Results are a text file AND a csv or parquet file.
This works because we are down to a single partition dataframe.
"""
assert filetype in ["csv", "parquet"]
QUERY_NUM = get_query_number()
clustering_info_name = f"{QUERY_NUM}-results-cluster-info.txt"
with open(f"{output_directory}q{clustering_info_name}", "w") as fh:
fh.write("Clusters:\n\n")
fh.write(f"Number of Clusters: {result_dict.get('nclusters')}\n")
fh.write(f"WSSSE: {result_dict.get('wssse')}\n")
centers = result_dict.get("cluster_centers")
if not isinstance(centers, np.ndarray):
centers = centers.values
for center in centers.tolist():
fh.write(f"{center}\n")
# this is a single partition dataframe, with cid_labels hard coded
# as the label column
data = result_dict.get("cid_labels")
if filetype == "csv":
clustering_result_name = f"q{QUERY_NUM}-results.csv"
data.to_csv(
f"{output_directory}{clustering_result_name}", index=False, header=None
)
else:
clustering_result_name = f"q{QUERY_NUM}-results.parquet"
data.to_parquet(f"{output_directory}{clustering_result_name}", write_index=False)
return 0
def remove_benchmark_files():
"""
Removes benchmark result files from cwd
to ensure that we don't upload stale results
"""
fname_ls = [
"benchmarked_write_result.csv",
"benchmarked_read_tables.csv",
"benchmarked_main.csv",
]
for fname in fname_ls:
if os.path.exists(fname):
os.remove(fname)
#################################
# Query Runner Utilities
#################################
def run_query(
config, client, query_func, write_func=write_result, sql_context=None
):
QUERY_NUM = get_query_number()
if config.get("dask_profile"):
with performance_report(filename=f"q{QUERY_NUM}_profile.html"):
if sql_context:
run_sql_query(
config=config,
client=client,
query_func=query_func,
sql_context=sql_context,
write_func=write_func,
)
else:
run_dask_cudf_query(
config=config, client=client, query_func=query_func, write_func=write_func,
)
else:
if sql_context:
run_sql_query(
config=config,
client=client,
query_func=query_func,
sql_context=sql_context,
write_func=write_func,
)
else:
run_dask_cudf_query(
config=config, client=client, query_func=query_func, write_func=write_func,
)
def run_dask_cudf_query(config, client, query_func, write_func=write_result):
"""
Common utility to perform all steps needed to execute a dask-cudf version
of the query. Includes attaching to cluster, running the query and writing results
"""
try:
remove_benchmark_files()
config["start_time"] = time.time()
results = benchmark(
query_func,
client=client,
config=config,
)
benchmark(
write_func,
results,
output_directory=config["output_dir"],
filetype=config["output_filetype"],
)
config["query_status"] = "Success"
result_verified = False
if config["verify_results"]:
result_verified = verify_results(config["verify_dir"])
config["result_verified"] = result_verified
except:
config["query_status"] = "Failed"
print("Encountered Exception while running query")
print(traceback.format_exc())
# google sheet benchmarking automation
push_payload_to_googlesheet(config)
def run_sql_query(
config, client, query_func, sql_context, write_func=write_result
):
"""
Common utility to perform all steps needed to execute a dask-cudf version
of the query. Includes attaching to cluster, running the query and writing results
"""
# TODO: Unify this with dask-cudf version
try:
remove_benchmark_files()
config["start_time"] = time.time()
data_dir = config["data_dir"]
results = benchmark(
query_func,
data_dir=data_dir,
client=client,
c=sql_context,
config=config,
)
benchmark(
write_func,
results,
output_directory=config["output_dir"],
filetype=config["output_filetype"],
)
config["query_status"] = "Success"
result_verified = False
if config["verify_results"]:
result_verified = verify_results(config["verify_dir"])
config["result_verified"] = result_verified
except:
config["query_status"] = "Failed"
print("Encountered Exception while running query")
print(traceback.format_exc())
# google sheet benchmarking automation
push_payload_to_googlesheet(config)
def add_empty_config(args):
keys = [
"get_read_time",
"split_row_groups",
"dask_profile",
"verify_results",
]
for key in keys:
if key not in args:
args[key] = None
if "file_format" not in args:
args["file_format"] = "parquet"
if "output_filetype" not in args:
args["output_filetype"] = "parquet"
return args
def gpubdb_argparser():
args = get_gpubdb_argparser_commandline_args()
with open(args["config_file"]) as fp:
args = yaml.safe_load(fp.read())
args = add_empty_config(args)
# Update specific core keys with environment variables
# if yaml configuration missing
KEYS_TO_ENV_VAR_MAPPING = {
"data_dir": os.environ.get("DATA_DIRECTORY"),
"output_dir": os.environ.get("OUTPUT_DIRECTORY", "./"),
"sheet": os.environ.get("GOOGLE_SPREADSHEET_NAME"),
"tab": os.environ.get("GOOGLE_SPREADSHEET_TAB"),
"scheduler_file_path": os.environ.get("SCHEDULER_FILE"),
"benchmark_runner_include_sql": os.environ.get("RUNNER_INCLUDE_SQL"),
}
for key in args.keys():
if args.get(key) is None and key in KEYS_TO_ENV_VAR_MAPPING:
args[key] = KEYS_TO_ENV_VAR_MAPPING[key]
return args
def get_gpubdb_argparser_commandline_args():
parser = argparse.ArgumentParser(description="Run GPU-BDB query")
print("Using default arguments")
parser.add_argument(
"--config_file",
default="benchmark_runner/benchmark_config.yaml",
type=str,
help="Location of benchmark configuration yaml file",
)
args = parser.parse_args()
args = vars(args)
return args
def get_scale_factor(data_dir):
"""
Returns scale factor from data_dir
"""
reg_match = re.search("sf[0-9]+", data_dir).group(0)
return int(reg_match[2:])
def get_query_number():
"""This assumes a directory structure like:
- rapids-queries
- q01
- q02
...
"""
QUERY_NUM = os.getcwd().split("/")[-1].strip("q")
return QUERY_NUM
#################################
# Correctness Verification
#################################
def assert_dataframes_pseudo_equal(df1, df2, significant=6):
"""Verify the pseudo-equality of two dataframes, acknowledging that:
- Row ordering may not be consistent between files
- Column ordering may vary between files,
- Floating point math can be annoying, so we may need to assert
equality at a specified level of precision
and assuming that:
- Files do not contain their own index values
- Column presence does not vary between files
- Datetime columns are read into memory consistently as either Object or Datetime columns
"""
from cudf.testing._utils import assert_eq
# check shape is the same
assert df1.shape == df2.shape
# check columns are the same
assert sorted(df1.columns.tolist()) == sorted(df2.columns.tolist())
# align column ordering across dataframes
df2 = df2[df1.columns]
# sort by every column, with the stable column ordering, then reset the index
df1 = df1.sort_values(by=df1.columns.tolist()).reset_index(drop=True)
df2 = df2.sort_values(by=df2.columns.tolist()).reset_index(drop=True)
# verify equality
assert_eq(df1, df2, check_less_precise=significant, check_dtype=False)
def calculate_label_overlap_percent(spark_labels, rapids_labels):
assert len(spark_labels) == len(rapids_labels)
spark_labels.columns = ["cid", "label"]
rapids_labels.columns = ["cid", "label"]
# assert that we clustered the same IDs
rapids_labels = rapids_labels.reset_index(drop=True)
assert spark_labels.cid.equals(rapids_labels.cid)
rapids_counts_normalized = rapids_labels.label.value_counts(
normalize=True
).reset_index()
spark_counts_normalized = spark_labels.label.value_counts(
normalize=True
).reset_index()
nclusters = 8
label_mapping = {}
for i in range(nclusters):
row_spark = spark_counts_normalized.iloc[i]
row_rapids = rapids_counts_normalized.iloc[i]
percent = row_spark["label"]
label_id_spark = row_spark["index"]
label_id_rapids = row_rapids["index"]
label_mapping[label_id_rapids.astype("int")] = label_id_spark.astype("int")
rapids_labels["label"] = rapids_labels["label"].replace(label_mapping)
merged = spark_labels.merge(rapids_labels, how="inner", on=["cid"])
overlap_percent = (merged.label_x == merged.label_y).sum() / len(merged) * 100
return overlap_percent
def compare_clustering_cost(spark_path, rapids_path):
with open(spark_path, "r") as fh:
spark_results = fh.readlines()
with open(rapids_path, "r") as fh:
rapids_results = fh.readlines()
spark_wssse = float(spark_results[3].split(": ")[1])
rapids_wssse = float(rapids_results[3].split(": ")[1])
delta_percent = abs(spark_wssse - rapids_wssse) / spark_wssse * 100
tolerance = 0.01 # allow for 1/100th of a percent cost difference
rapids_cost_similar = (rapids_wssse <= spark_wssse) or (delta_percent <= tolerance)
print(f"Cost delta percent: {delta_percent}")
print(f"RAPIDS cost lower/similar: {rapids_cost_similar}")
return rapids_cost_similar, delta_percent
def verify_clustering_query_cost(spark_path, rapids_path):
rapids_cost_lower, delta_percent = compare_clustering_cost(spark_path, rapids_path,)
assert rapids_cost_lower
def verify_clustering_query_labels(spark_data, rapids_data):
overlap_percent = calculate_label_overlap_percent(spark_data, rapids_data)
print(f"Label overlap percent: {overlap_percent}")
return 0
def compare_supervised_metrics(validation, results):
val_precision = float(validation[0].split(": ")[1])
val_auc = float(validation[1].split(": ")[1])
results_precision = float(results[0].split(": ")[1])
results_auc = float(results[1].split(": ")[1])
tolerance = 0.01 # allow for 1/100th of a percent cost difference
precision_delta_percent = (
abs(val_precision - results_precision) / val_precision * 100
)
precision_similar = (results_precision >= val_precision) or (
precision_delta_percent <= tolerance
)
auc_delta_percent = abs(val_auc - results_auc) / val_precision * 100
auc_similar = (results_auc >= val_auc) or (auc_delta_percent <= tolerance)
print(f"Precisiom delta percent: {precision_delta_percent}")
print(f"AUC delta percent: {auc_delta_percent}")
print(f"Precision higher/similar: {precision_similar}")
print(f"AUC higher/similar: {auc_similar}")
return precision_similar, auc_similar, precision_delta_percent
def verify_supervised_metrics(validation, results):
(
precision_similar,
auc_similar,
precision_delta_percent,
) = compare_supervised_metrics(validation, results)
assert precision_similar and auc_similar
def verify_sentiment_query(results, validation, query_number, threshold=90):
if query_number == "18":
group_cols = ["s_name", "r_date", "sentiment", "sentiment_word"]
else:
group_cols = ["item_sk", "sentiment", "sentiment_word"]
r_grouped = results.groupby(group_cols).size().reset_index()
s_grouped = validation.groupby(group_cols).size().reset_index()
t1 = r_grouped
t2 = s_grouped
rapids_nrows = t1.shape[0]
spark_nrows = t2.shape[0]
res_rows = t1.merge(t2, how="inner", on=list(t1.columns)).shape[0]
overlap_percent_rapids_denom = res_rows / rapids_nrows * 100
overlap_percent_spark_denom = res_rows / spark_nrows * 100
print(
f"{overlap_percent_rapids_denom}% overlap with {rapids_nrows} rows (RAPIDS denominator)"
)
print(
f"{overlap_percent_spark_denom}% overlap with {spark_nrows} rows (Spark denominator)"
)
assert overlap_percent_rapids_denom >= threshold
assert overlap_percent_spark_denom >= threshold
return 0
def verify_results(verify_dir):
"""
verify_dir: Directory which contains verification results
"""
import cudf
import cupy as cp
import dask.dataframe as dd
QUERY_NUM = get_query_number()
# Query groupings
SENTIMENT_QUERIES = (
"10",
"18",
"19",
)
CLUSTERING_QUERIES = (
"20",
"25",
"26",
)
SUPERVISED_LEARNING_QUERIES = (
"05",
"28",
)
# Key Thresholds
SENTIMENT_THRESHOLD = 90
result_verified = False
# Short-circuit for the NER query
if QUERY_NUM in ("27"):
print("Did not run Correctness check for this query")
return result_verified
# Setup validation data
if QUERY_NUM in SUPERVISED_LEARNING_QUERIES:
verify_fname = os.path.join(
verify_dir, f"q{QUERY_NUM}-results/q{QUERY_NUM}-metrics-results.txt"
)
result_fname = f"q{QUERY_NUM}-metrics-results.txt"
with open(verify_fname, "r") as fh:
validation_data = fh.readlines()
else:
result_fname = f"q{QUERY_NUM}-results.parquet/"
verify_fname = glob.glob(verify_dir + f"q{QUERY_NUM}-results/*.csv")
validation_data = dd.read_csv(verify_fname, escapechar="\\").compute()
# Setup results data
# special case q12 due to the parquet output, which seems to be causing problems
# for the reader
if QUERY_NUM in ("12",):
results_data = dask_cudf.read_parquet(result_fname + "*.parquet").compute()
results_data = results_data.to_pandas()
elif QUERY_NUM in SUPERVISED_LEARNING_QUERIES:
with open(result_fname, "r") as fh:
results_data = fh.readlines()
else:
results_data = dask_cudf.read_parquet(result_fname).compute()
results_data = results_data.to_pandas()
# Verify correctness
if QUERY_NUM in SUPERVISED_LEARNING_QUERIES:
print("Supervised Learning Query")
try:
verify_supervised_metrics(validation_data, results_data)
result_verified = True
print("Correctness Assertion True")
except AssertionError as error:
print("Error", error)
print("Correctness Assertion False")
elif QUERY_NUM in CLUSTERING_QUERIES:
print("Clustering Query")
try:
cluster_info_validation_path = os.path.join(
verify_dir, f"q{QUERY_NUM}-results/clustering-results.txt"
)
cluster_info_rapids_path = f"q{QUERY_NUM}-results-cluster-info.txt"
# primary metric
verify_clustering_query_cost(
cluster_info_validation_path, cluster_info_rapids_path
)
# secondary metric (non-binding)
verify_clustering_query_labels(validation_data, results_data)
result_verified = True
print("Correctness Assertion True")
except AssertionError as error:
print("Error", error)
print("Correctness Assertion False")
elif QUERY_NUM in SENTIMENT_QUERIES:
print("Sentiment Analysis Query")
try:
verify_sentiment_query(
results_data, validation_data, QUERY_NUM, threshold=SENTIMENT_THRESHOLD
)
result_verified = True
print("Correctness Assertion True")
except AssertionError as error:
print("Error", error)
print("Correctness Assertion False")
# scalar results
elif QUERY_NUM in ("04", "23"):
print("Scalar Result Query")
try:
np.testing.assert_array_almost_equal(
validation_data.values, results_data.values, decimal=5
)
result_verified = True
print("Correctness Assertion True")
except AssertionError as error:
print("Error", error)
print("Correctness Assertion False")
else:
print("Standard ETL Query")
try:
assert_dataframes_pseudo_equal(results_data, validation_data)
result_verified = True
print("Correctness Assertion True")
except AssertionError as error:
print("Error", error)
print("Correctness Assertion False")
return result_verified
#################################
# Performance Tracking Automation
#################################
def build_benchmark_googlesheet_payload(config):
"""
config : dict
"""
# Don't mutate original dictionary
data = config.copy()
# get the hostname of the machine running this workload
data["hostname"] = socket.gethostname()
QUERY_NUM = get_query_number()
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
query_time = _get_benchmarked_method_time(
filename="benchmarked_main.csv", query_start_time=config.get("start_time")
)
writing_time = _get_benchmarked_method_time(
filename="benchmarked_write_result.csv",
query_start_time=config.get("start_time"),
)
read_graph_creation_time = _get_benchmarked_method_time(
filename="benchmarked_read_tables.csv",
query_start_time=config.get("start_time"),
)
if data["get_read_time"] and read_graph_creation_time and query_time:
### below contains the computation time
compute_read_table_time = _get_benchmarked_method_time(
filename="benchmarked_read_tables.csv",
field="compute_time_seconds",
query_start_time=config.get("start_time"),
)
# subtracting read calculation time
query_time = query_time - compute_read_table_time
else:
compute_read_table_time = None
# get library info
library_info = generate_library_information()
data.update(library_info)
payload = OrderedDict(
{
"Query Number": QUERY_NUM,
"Protocol": "UCX" if data.get("nvlink") == True else "TCP",
"NVLINK": data.get("nvlink", "NA"),
"Infiniband": data.get("infiniband", "NA"),
"Query Type": "sql" if is_sql_query() else "dask",
"File Format": data.get("file_format"),
"Time (seconds)": query_time + writing_time
if query_time and writing_time
else "NA",
"Query Time (seconds)": query_time if query_time else "NA",
"Writing Results Time": writing_time if writing_time else "NA",
# read time
"Compute Read + Repartition small table Time(seconds)": compute_read_table_time
if compute_read_table_time
else "NA",
"Graph Creation time(seconds)": read_graph_creation_time
if read_graph_creation_time
else "NA",
"Hostname": data.get("hostname"),
"RMM Pool Size": os.environ.get("POOL_SIZE"),
"Device Memory Limit": os.environ.get("DEVICE_MEMORY_LIMIT"),
"Number of GPUs": os.environ.get("NUM_WORKERS"),
"Data Location": data.get("data_dir"),
"Current Time": current_time,
"cuDF Version": data.get("cudf"),
"Dask SQL Version": data.get("sql"),
"Dask Version": data.get("dask"),
"Distributed Version": data.get("distributed"),
"Dask-CUDA Version": data.get("dask-cuda"),
"UCX-py Version": data.get("ucx-py"),
"UCX Version": data.get("ucx"),
"RMM Version": data.get("rmm"),
"cuML Version": data.get("cuml"),
"CuPy Version": data.get("cupy"),
"Query Status": data.get("query_status", "Unknown"),
"Unique Run ID": data.get("run_id"),
}
)
payload = list(payload.values())
return payload
def is_sql_query():
"""
Method that returns true if caller of the utility is a SQL query, returns false otherwise
Assumes that caller is 3 levels above the stack
query_of_interest -> utils.push_to_google_sheet -> utils.build_payload -> utils.is_sql_query
Another potential solution is checking sys.modules.get("dask_sql") to check Dask-SQL is imported
"""
return "sql" in inspect.stack()[-3].function
def _get_benchmarked_method_time(
filename, field="elapsed_time_seconds", query_start_time=None
):
"""
Returns the `elapsed_time_seconds` field from files generated using the `benchmark` decorator.
"""
import cudf
try:
benchmark_results = cudf.read_csv(filename)
benchmark_time = benchmark_results[field].iloc[0]
except FileNotFoundError:
benchmark_time = None
return benchmark_time
def generate_library_information():
KEY_LIBRARIES = [
"cudf",
"cuml",
"dask",
"distributed",
"ucx",
"ucx-py",
"dask-cuda",
"rmm",
"cupy",
"dask-sql",
]
conda_list_command = (
os.environ.get("CONDA_PREFIX").partition("envs")[0] + "bin/conda list"
)
result = subprocess.run(
conda_list_command, stdout=subprocess.PIPE, shell=True
).stdout.decode("utf-8")
df = pd.DataFrame(
[x.split() for x in result.split("\n")[3:]],
columns=["library", "version", "build", "channel"],
)
df = df[df.library.isin(KEY_LIBRARIES)]
lib_dict = dict(zip(df.library, df.version))
return lib_dict
def push_payload_to_googlesheet(config):
if os.environ.get("GOOGLE_SHEETS_CREDENTIALS_PATH", None):
if not config.get("tab") or not config.get("sheet"):
print("Must pass a sheet and tab name to use Google Sheets automation")
return 1
scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
credentials_path = os.environ["GOOGLE_SHEETS_CREDENTIALS_PATH"]
credentials = ServiceAccountCredentials.from_json_keyfile_name(
credentials_path, scope
)
gc = gspread.authorize(credentials)
payload = build_benchmark_googlesheet_payload(config)
s = gc.open(config["sheet"])
tab = s.worksheet(config["tab"])
tab.append_row(payload, value_input_option='USER_ENTERED', table_range='A2')
#################################
# Query Utilities
#################################
def left_semi_join(df_1, df_2, left_on, right_on):
"""
Pefrorm left semi join b/w tables
"""
left_merge = lambda df_1, df_2: df_1.merge(
df_2, left_on=left_on, right_on=right_on, how="leftsemi"
)
## asserting that number of partitions of the right frame is always 1
assert df_2.npartitions == 1
return df_1.map_partitions(left_merge, df_2.to_delayed()[0], meta=df_1._meta)
def convert_datestring_to_days(df):
import cudf
df["d_date"] = (
cudf.to_datetime(df["d_date"], format="%Y-%m-%d")
.astype("datetime64[s]")
.astype("int64")
/ 86400
)
df["d_date"] = df["d_date"].astype("int64")
return df
def train_clustering_model(training_df, n_clusters, max_iter, n_init):
"""Trains a KMeans clustering model on the
given dataframe and returns the resulting
labels and WSSSE"""
from cuml.cluster.kmeans import KMeans as cuKMeans
from sklearn.cluster import KMeans
# Optimizing by doing multiple seeding iterations.
if isinstance(training_df, pd.DataFrame):
model = KMeans(
n_clusters=n_clusters,
max_iter=max_iter,
random_state=np.random.randint(0, 500),
init="k-means++",
n_init=n_init,
)
else:
model = cuKMeans(
oversampling_factor=0,
n_clusters=n_clusters,
max_iter=max_iter,
random_state=np.random.randint(0, 500),
init="k-means++",
n_init=n_init,
)
model.fit(training_df)
return {
"cid_labels": model.labels_,
"wssse": model.inertia_,
"cluster_centers": model.cluster_centers_,
"nclusters": n_clusters,
}
def get_clusters(client, kmeans_input_df):
N_CLUSTERS = 8
CLUSTER_ITERATIONS = 20
N_ITER = 5
ml_tasks = [
delayed(train_clustering_model)(df, N_CLUSTERS, CLUSTER_ITERATIONS, N_ITER)
for df in kmeans_input_df.to_delayed()
]
results_dict = client.compute(*ml_tasks, sync=True)
output = kmeans_input_df.index.to_frame().reset_index(drop=True)
if isinstance(kmeans_input_df, dask_cudf.DataFrame):
labels_final = dask_cudf.from_cudf(
results_dict["cid_labels"], npartitions=output.npartitions
)
else:
labels_final = dd.from_pandas(
pd.DataFrame(results_dict["cid_labels"]), npartitions=output.npartitions
)
output["label"] = labels_final.reset_index()[0]
results_dict["cid_labels"] = output
return results_dict
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q25_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
q25_date = "2002-01-02"
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
ss_cols = ["ss_customer_sk", "ss_sold_date_sk", "ss_ticket_number", "ss_net_paid"]
ws_cols = [
"ws_bill_customer_sk",
"ws_sold_date_sk",
"ws_order_number",
"ws_net_paid",
]
datedim_cols = ["d_date_sk", "d_date"]
ss_ddf = table_reader.read("store_sales", relevant_cols=ss_cols, index=False)
ws_ddf = table_reader.read("web_sales", relevant_cols=ws_cols, index=False)
datedim_ddf = table_reader.read("date_dim", relevant_cols=datedim_cols, index=False)
if c:
c.create_table("web_sales", ws_ddf, persist=False)
c.create_table("store_sales", ss_ddf, persist=False)
c.create_table("date_dim", datedim_ddf, persist=False)
return ss_ddf, ws_ddf, datedim_ddf
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q12_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
q12_i_category_IN = "'Books', 'Electronics'"
item_cols = ["i_item_sk", "i_category"]
store_sales_cols = ["ss_item_sk", "ss_sold_date_sk", "ss_customer_sk"]
wcs_cols = ["wcs_user_sk", "wcs_click_date_sk", "wcs_item_sk", "wcs_sales_sk"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
item_df = table_reader.read("item", relevant_cols=item_cols)
store_sales_df = table_reader.read("store_sales", relevant_cols=store_sales_cols)
wcs_df = table_reader.read("web_clickstreams", relevant_cols=wcs_cols)
if c:
c.create_table("web_clickstreams", wcs_df, persist=False)
c.create_table("store_sales", store_sales_df, persist=False)
c.create_table("item", item_df, persist=False)
return item_df, store_sales_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q19_utils.py |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
q19_returns_dates_IN = ["2004-03-08", "2004-08-02", "2004-11-15", "2004-12-20"]
eol_char = "è"
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"], basepath=config["data_dir"], backend=config["backend"],
)
date_dim_cols = ["d_week_seq", "d_date_sk", "d_date"]
date_dim_df = table_reader.read("date_dim", relevant_cols=date_dim_cols)
store_returns_cols = ["sr_returned_date_sk", "sr_item_sk", "sr_return_quantity"]
store_returns_df = table_reader.read(
"store_returns", relevant_cols=store_returns_cols
)
web_returns_cols = ["wr_returned_date_sk", "wr_item_sk", "wr_return_quantity"]
web_returns_df = table_reader.read("web_returns", relevant_cols=web_returns_cols)
### splitting by row groups for better parallelism
pr_table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=True,
backend=config["backend"],
)
product_reviews_cols = ["pr_item_sk", "pr_review_content", "pr_review_sk"]
product_reviews_df = pr_table_reader.read(
"product_reviews", relevant_cols=product_reviews_cols
)
if c:
c.create_table('web_returns', web_returns_df, persist=False)
c.create_table('date_dim', date_dim_df, persist=False)
c.create_table('product_reviews', product_reviews_df, persist=False)
c.create_table('store_returns', store_returns_df, persist=False)
return date_dim_df, store_returns_df, web_returns_df, product_reviews_df
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/cluster_startup.py | #
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import requests
import sys
import importlib
import dask
from dask.distributed import Client
from dask.utils import parse_bytes
def attach_to_cluster(config, create_sql_context=False):
"""Attaches to an existing cluster if available.
By default, tries to attach to a cluster running on localhost:8786 (dask's default).
This is currently hardcoded to assume the dashboard is running on port 8787.
Optionally, this will also create a Dask-SQL Context.
"""
scheduler_file = config.get("scheduler_file_path")
host = config.get("cluster_host")
port = config.get("cluster_port", "8786")
start_local_cluster = config.get("start_local_cluster", False)
if start_local_cluster:
from dask_cuda import LocalCUDACluster
cluster = LocalCUDACluster(
n_workers=int(os.environ.get("NUM_WORKERS", 16)),
device_memory_limit=os.environ.get("DEVICE_MEMORY_LIMIT", "20GB"),
local_directory=os.environ.get("LOCAL_DIRECTORY"),
rmm_pool_size=os.environ.get("POOL_SIZE", "29GB"),
memory_limit=os.environ.get("DEVICE_MEMORY_LIMIT", "1546828M"),
enable_tcp_over_ucx=os.environ.get("CLUSTER_MODE", "TCP")=="NVLINK",
enable_nvlink=os.environ.get("CLUSTER_MODE", "TCP")=="NVLINK",
protocol="ucx" if os.environ.get("CLUSTER_MODE", "TCP")=="NVLINK" else "tcp",
enable_infiniband=False,
enable_rdmacm=False,
jit_unspill=True
)
client = Client(cluster)
elif scheduler_file is not None:
try:
with open(scheduler_file) as fp:
print(fp.read())
client = Client(scheduler_file=scheduler_file)
print('Connected!')
except OSError as e:
sys.exit(f"Unable to create a Dask Client connection: {e}")
elif host is not None:
try:
content = requests.get(
"http://" + host + ":8787/info/main/workers.html"
).content.decode("utf-8")
url = content.split("Scheduler ")[1].split(":" + str(port))[0]
client = Client(address=f"{url}:{port}")
print(f"Connected to {url}:{port}")
config["protocol"] = str(url)[0:3]
except requests.exceptions.ConnectionError as e:
sys.exit(
f"Unable to connect to existing dask scheduler dashboard to determine cluster type: {e}"
)
except OSError as e:
sys.exit(f"Unable to create a Dask Client connection: {e}")
else:
raise ValueError("Must pass a scheduler file or cluster address to the host argument.")
def maybe_create_worker_directories(dask_worker):
worker_dir = dask_worker.local_directory
if not os.path.exists(worker_dir):
os.mkdir(worker_dir)
client.run(maybe_create_worker_directories)
# Get ucx config variables
ucx_config = client.submit(_get_ucx_config).result()
config.update(ucx_config)
# CuPy should use RMM on all worker and client processes
import cupy as cp
import rmm
cp.cuda.set_allocator(rmm.rmm_cupy_allocator)
client.run(cp.cuda.set_allocator, rmm.rmm_cupy_allocator)
# Save worker information
# Assumes all GPUs are the same size
expected_workers = int(os.environ.get("NUM_WORKERS", 16))
worker_counts = worker_count_info(client)
for gpu_size, count in worker_counts.items():
if count != 0:
current_workers = worker_counts.pop(gpu_size)
break
if expected_workers is not None and expected_workers != current_workers:
print(
f"Expected {expected_workers} {gpu_size} workers in your cluster, but got {current_workers}. It can take a moment for all workers to join the cluster. You may also have misconfigred hosts."
)
sys.exit(-1)
config["16GB_workers"] = worker_counts.get("16GB", 0)
config["32GB_workers"] = worker_counts.get("32GB", 0)
config["40GB_workers"] = worker_counts.get("40GB", 0)
config["80GB_workers"] = worker_counts.get("80GB", 0)
c = None
if create_sql_context:
from dask_sql import Context
c = Context()
return client, c
def worker_count_info(client):
"""
Method accepts the Client object and returns a dictionary
containing number of workers per GPU size specified
Assumes all GPUs are of the same type.
"""
gpu_sizes = ["16GB", "32GB", "40GB", "80GB"]
counts_by_gpu_size = dict.fromkeys(gpu_sizes, 0)
tolerance = "6.3GB"
worker_info = client.scheduler_info()["workers"]
for worker, info in worker_info.items():
worker_device_memory = info["gpu"]["memory-total"]
for gpu_size in gpu_sizes:
if abs(parse_bytes(gpu_size) - worker_device_memory) < parse_bytes(tolerance):
counts_by_gpu_size[gpu_size] += 1
break
return counts_by_gpu_size
def _get_ucx_config():
"""
Get a subset of ucx config variables relevant for benchmarking
"""
relevant_configs = ["infiniband", "nvlink"]
ucx_config = dask.config.get("distributed.comm.ucx")
# Doing this since when relevant configs are not enabled the value is `None` instead of `False`
filtered_ucx_config = {
config: ucx_config.get(config) if ucx_config.get(config) else False
for config in relevant_configs
}
return filtered_ucx_config
def import_query_libs():
library_list = [
"rmm",
"cudf",
"cuml",
"cupy",
"sklearn",
"dask_cudf",
"pandas",
"numpy",
"spacy",
]
for lib in library_list:
importlib.import_module(lib)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q09_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
# -------- Q9 -----------
q09_year = 2001
q09_part1_ca_country = "United States"
q09_part1_ca_state_IN = "KY", "GA", "NM"
q09_part1_net_profit_min = 0
q09_part1_net_profit_max = 2000
q09_part1_education_status = "4 yr Degree"
q09_part1_marital_status = "M"
q09_part1_sales_price_min = 100
q09_part1_sales_price_max = 150
q09_part2_ca_country = "United States"
q09_part2_ca_state_IN = "MT", "OR", "IN"
q09_part2_net_profit_min = 150
q09_part2_net_profit_max = 3000
q09_part2_education_status = "4 yr Degree"
q09_part2_marital_status = "M"
q09_part2_sales_price_min = 50
q09_part2_sales_price_max = 200
q09_part3_ca_country = "United States"
q09_part3_ca_state_IN = "WI", "MO", "WV"
q09_part3_net_profit_min = 50
q09_part3_net_profit_max = 25000
q09_part3_education_status = "4 yr Degree"
q09_part3_marital_status = "M"
q09_part3_sales_price_min = 150
q09_part3_sales_price_max = 200
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
ss_columns = [
"ss_quantity",
"ss_sold_date_sk",
"ss_addr_sk",
"ss_store_sk",
"ss_cdemo_sk",
"ss_sales_price",
"ss_net_profit",
]
store_sales = table_reader.read("store_sales", relevant_cols=ss_columns)
ca_columns = ["ca_address_sk", "ca_country", "ca_state"]
customer_address = table_reader.read("customer_address", relevant_cols=ca_columns)
cd_columns = ["cd_demo_sk", "cd_marital_status", "cd_education_status"]
customer_demographics = table_reader.read(
"customer_demographics", relevant_cols=cd_columns
)
dd_columns = ["d_year", "d_date_sk"]
date_dim = table_reader.read("date_dim", relevant_cols=dd_columns)
s_columns = ["s_store_sk"]
store = table_reader.read("store", relevant_cols=s_columns)
if c:
c.create_table("store_sales", store_sales, persist=False)
c.create_table("customer_address", customer_address, persist=False)
c.create_table("customer_demographics", customer_demographics, persist=False)
c.create_table("date_dim", date_dim, persist=False)
c.create_table("store", store, persist=False)
return (store_sales, customer_address, customer_demographics, date_dim, store)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/bdb_tools/q06_utils.py | #
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
# -------- Q6 -----------
q06_LIMIT = 100
# --web_sales and store_sales date
q06_YEAR = 2001
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
web_sales_cols = [
"ws_bill_customer_sk",
"ws_sold_date_sk",
"ws_ext_list_price",
"ws_ext_wholesale_cost",
"ws_ext_discount_amt",
"ws_ext_sales_price",
]
store_sales_cols = [
"ss_customer_sk",
"ss_sold_date_sk",
"ss_ext_list_price",
"ss_ext_wholesale_cost",
"ss_ext_discount_amt",
"ss_ext_sales_price",
]
date_cols = ["d_date_sk", "d_year", "d_moy"]
customer_cols = [
"c_customer_sk",
"c_customer_id",
"c_email_address",
"c_first_name",
"c_last_name",
"c_preferred_cust_flag",
"c_birth_country",
"c_login",
]
ws_df = table_reader.read("web_sales", relevant_cols=web_sales_cols)
ss_df = table_reader.read("store_sales", relevant_cols=store_sales_cols)
date_df = table_reader.read("date_dim", relevant_cols=date_cols)
customer_df = table_reader.read("customer", relevant_cols=customer_cols)
if c:
c.create_table('web_sales', ws_df, persist=False)
c.create_table('store_sales', ss_df, persist=False)
c.create_table('date_dim', date_df, persist=False)
c.create_table('customer', customer_df, persist=False)
return (ws_df, ss_df, date_df, customer_df)
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/cluster_configuration/cluster-startup.sh | #!/bin/bash
#########################################################
### Configuration to (possibly) tweak ###
#########################################################
USERNAME=$(whoami)
# Scheduler configuration for your machine or cluster
SCHEDULER_PORT=${SCHEDULER_PORT:-8786}
DASHBOARD_ADDRESS=${DASHBOARD_ADDRESS:-8787}
INTERFACE=${INTERFACE:-ib0}
# Logging and scratch space for your machine or cluster
LOCAL_DIRECTORY=${LOCAL_DIRECTORY:-/raid/$USERNAME/dask-local-directory}
SCHEDULER_FILE=${SCHEDULER_FILE:-$LOCAL_DIRECTORY/scheduler.json}
LOGDIR=${LOGDIR:-$LOCAL_DIRECTORY/logs}
WORKER_DIR=${WORKER_DIR:-/raid/$USERNAME/gpu-bdb-dask-workers/}
# Communication protocol
CLUSTER_MODE=${CLUSTER_MODE:-TCP}
# Cluster memory configuration
MAX_SYSTEM_MEMORY=$(free -m | awk '/^Mem:/{print $2}')M
DEVICE_MEMORY_LIMIT=${DEVICE_MEMORY_LIMIT:-20GB}
POOL_SIZE=${POOL_SIZE:-29GB}
# Conda environment information
CONDA_ENV_NAME=${CONDA_ENV_NAME:-rapids-gpu-bdb}
CONDA_ENV_PATH=${CONDA_ENV_PATH:-/raid/$USERNAME/miniconda3/etc/profile.d/conda.sh}
# Repository home
GPU_BDB_HOME=${GPU_BDB_HOME:-/raid/$USERNAME/prod/gpu-bdb}
# Dask-cuda optional configuration
export DASK_JIT_UNSPILL=${DASK_JIT_UNSPILL:-True}
export DASK_EXPLICIT_COMMS=${DASK_EXPLICIT_COMMS:-False}
#########################################################
### Configuration to (generally) leave as default ### #########################################################
ROLE=$1
# Dask/distributed configuration
export DASK_DISTRIBUTED__COMM__TIMEOUTS__CONNECT=${DASK_DISTRIBUTED__COMM__TIMEOUTS__CONNECT:-100s}
export DASK_DISTRIBUTED__COMM__TIMEOUTS__TCP=${DASK_DISTRIBUTED__COMM__TIMEOUTS__TCP:-600s}
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MIN=${DASK_DISTRIBUTED__COMM__RETRY__DELAY__MIN:-1s}
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MAX=${DASK_DISTRIBUTED__COMM__RETRY__DELAY__MAX:-60s}
# Purge Dask worker and log directories
if [ "$ROLE" = "SCHEDULER" ]; then
rm -rf $LOGDIR/*
mkdir -p $LOGDIR
rm -rf $WORKER_DIR/*
mkdir -p $WORKER_DIR
fi
# Purge Dask config directories
rm -rf ~/.config/dask
#########################################################
### Launch the cluster ### #########################################################
# Activate conda environment and install the local library
source $CONDA_ENV_PATH
conda activate $CONDA_ENV_NAME
cd $GPU_BDB_HOME/gpu_bdb
python -m pip install .
if [ "$ROLE" = "SCHEDULER" ]; then
if [ "$CLUSTER_MODE" = "NVLINK" ]; then
echo "Starting UCX scheduler.."
CUDA_VISIBLE_DEVICES='0' DASK_UCX__CUDA_COPY=True DASK_UCX__TCP=True DASK_UCX__NVLINK=True DASK_UCX__INFINIBAND=False DASK_UCX__RDMACM=False nohup dask-scheduler --dashboard-address $DASHBOARD_ADDRESS --port $SCHEDULER_PORT --interface $INTERFACE --protocol ucx --scheduler-file $SCHEDULER_FILE > $LOGDIR/scheduler.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "TCP" ]; then
echo "Starting TCP scheduler.."
CUDA_VISIBLE_DEVICES='0' nohup dask-scheduler --dashboard-address $DASHBOARD_ADDRESS --port $SCHEDULER_PORT --interface $INTERFACE --protocol tcp --scheduler-file $SCHEDULER_FILE > $LOGDIR/scheduler.log 2>&1 &
fi
fi
# Setup workers
if [ "$CLUSTER_MODE" = "NVLINK" ]; then
echo "Starting workers.."
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $WORKER_DIR --rmm-pool-size $POOL_SIZE --memory-limit $MAX_SYSTEM_MEMORY --enable-tcp-over-ucx --enable-nvlink --disable-infiniband --scheduler-file $SCHEDULER_FILE >> $LOGDIR/worker.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "TCP" ]; then
echo "Starting workers.."
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $WORKER_DIR --rmm-pool-size $POOL_SIZE --memory-limit=$MAX_SYSTEM_MEMORY --scheduler-file $SCHEDULER_FILE >> $LOGDIR/worker.log 2>&1 &
fi
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/cluster_configuration/bsql-cluster-startup.sh | #IB, NVLINK, or TCP
CLUSTER_MODE=$1
USERNAME=$(whoami)
MAX_SYSTEM_MEMORY=$(free -m | awk '/^Mem:/{print $2}')M
DEVICE_MEMORY_LIMIT="25GB"
POOL_SIZE="30GB"
# Fill in your environment name and conda path on each node
GPU_BDB_HOME="/home/$USERNAME/shared/gpu-bdb"
CONDA_ENV_NAME="rapids-gpu-bdb"
CONDA_ENV_PATH="/home/$USERNAME/conda/etc/profile.d/conda.sh"
# TODO: Unify interface/IP setting/getting for cluster startup
# and scheduler file
INTERFACE="ib0"
# TODO: Remove hard-coding of scheduler
SCHEDULER=$(hostname)
SCHEDULER_FILE=$GPU_BDB_HOME/gpu_bdb/cluster_configuration/example-cluster-scheduler.json
LOGDIR="/tmp/gpu-bdb-dask-logs/"
WORKER_DIR="/tmp/gpu-bdb-dask-workers/"
# Purge Dask worker and log directories
rm -rf $LOGDIR/*
mkdir -p $LOGDIR
rm -rf $WORKER_DIR/*
mkdir -p $WORKER_DIR
# Purge Dask config directories
rm -rf ~/.config/dask
# Activate conda environment
source $CONDA_ENV_PATH
conda activate $CONDA_ENV_NAME
# Dask/distributed configuration
export DASK_DISTRIBUTED__COMM__TIMEOUTS__CONNECT="100s"
export DASK_DISTRIBUTED__COMM__TIMEOUTS__TCP="600s"
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MIN="1s"
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MAX="60s"
# Setup scheduler
if [ "$HOSTNAME" = $SCHEDULER ]; then
if [ "$CLUSTER_MODE" = "NVLINK" ]; then
CUDA_VISIBLE_DEVICES='0' DASK_UCX__CUDA_COPY=True DASK_UCX__TCP=True DASK_UCX__NVLINK=True DASK_UCX__INFINIBAND=False DASK_UCX__RDMACM=False nohup dask-scheduler --dashboard-address 8787 --interface $INTERFACE --protocol ucx > $LOGDIR/scheduler.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "TCP" ]; then
CUDA_VISIBLE_DEVICES='0' nohup dask-scheduler --dashboard-address 8787 --interface $INTERFACE --protocol tcp > $LOGDIR/scheduler.log 2>&1 &
fi
fi
# Setup workers
if [ "$BLAZING_ALLOCATOR_MODE" = "existing" ]; then
if [ "$CLUSTER_MODE" = "NVLINK" ]; then
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $WORKER_DIR --rmm-pool-size=$POOL_SIZE --memory-limit=$MAX_SYSTEM_MEMORY --enable-tcp-over-ucx --enable-nvlink --disable-infiniband --scheduler-file $SCHEDULER_FILE >> $LOGDIR/worker.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "TCP" ]; then
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $WORKER_DIR --rmm-pool-size=$POOL_SIZE --memory-limit=$MAX_SYSTEM_MEMORY --scheduler-file $SCHEDULER_FILE >> $LOGDIR/worker.log 2>&1 &
fi
else
if [ "$CLUSTER_MODE" = "NVLINK" ]; then
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $WORKER_DIR --memory-limit=$MAX_SYSTEM_MEMORY --enable-tcp-over-ucx --enable-nvlink --disable-infiniband --scheduler-file $SCHEDULER_FILE >> $LOGDIR/worker.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "TCP" ]; then
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $WORKER_DIR --memory-limit=$MAX_SYSTEM_MEMORY --scheduler-file $SCHEDULER_FILE >> $LOGDIR/worker.log 2>&1 &
fi
fi
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/cluster_configuration/cluster-startup-slurm.sh | #!/bin/bash
#########################################################
### Configuration to (possibly) tweak ###
#########################################################
USERNAME=$(whoami)
# Logging and scratch space for your machine or cluster
LOCAL_DIRECTORY=${LOCAL_DIRECTORY:-$HOME/dask-local-directory}
SCHEDULER_FILE=${SCHEDULER_FILE:-$LOCAL_DIRECTORY/scheduler.json}
LOGDIR=${LOGDIR:-$LOCAL_DIRECTORY/logs}
WORKER_DIR=${WORKER_DIR:-/tmp/gpu-bdb-dask-workers/}
# Communication protocol
CLUSTER_MODE=${CLUSTER_MODE:-TCP}
# Cluster memory configuration
MAX_SYSTEM_MEMORY=$(free -m | awk '/^Mem:/{print $2}')M
DEVICE_MEMORY_LIMIT=${DEVICE_MEMORY_LIMIT:-70GB}
POOL_SIZE=${POOL_SIZE:-78GB}
# Conda environment information
CONDA_ENV_NAME=${CONDA_ENV_NAME:-rapids-gpu-bdb}
CONDA_ENV_PATH=${CONDA_ENV_PATH:-/opt/conda/etc/profile.d/conda.sh}
# Repository home
GPU_BDB_HOME=${GPU_BDB_HOME:-$HOME/gpu-bdb}
# Dask-cuda optional configuration
export DASK_JIT_UNSPILL=${DASK_JIT_UNSPILL:-True}
export DASK_EXPLICIT_COMMS=${DASK_EXPLICIT_COMMS:-False}
#########################################################
### Configuration to (generally) leave as default ### #########################################################
ROLE=$1
HOSTNAME=$HOSTNAME
# Dask/distributed configuration
export DASK_DISTRIBUTED__COMM__TIMEOUTS__CONNECT=${DASK_DISTRIBUTED__COMM__TIMEOUTS__CONNECT:-100s}
export DASK_DISTRIBUTED__COMM__TIMEOUTS__TCP=${DASK_DISTRIBUTED__COMM__TIMEOUTS__TCP:-600s}
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MIN=${DASK_DISTRIBUTED__COMM__RETRY__DELAY__MIN:-1s}
export DASK_DISTRIBUTED__COMM__RETRY__DELAY__MAX=${DASK_DISTRIBUTED__COMM__RETRY__DELAY__MAX:-60s}
# Purge Dask worker and log directories
if [ "$ROLE" = "SCHEDULER" ]; then
rm -rf $LOCAL_DIRECTORY/*
mkdir -p $LOCAL_DIRECTORY
rm -rf $LOGDIR/*
mkdir -p $LOGDIR
rm -rf $WORKER_DIR/*
mkdir -p $WORKER_DIR
fi
# Purge Dask config directories
rm -rf ~/.config/dask
#########################################################
### Launch the cluster ###
#########################################################
# Activate conda environment and install the local library
source $CONDA_ENV_PATH
conda activate $CONDA_ENV_NAME
cd $GPU_BDB_HOME/gpu_bdb
python -m pip install .
# Setup scheduler
if [ "$ROLE" = "SCHEDULER" ]; then
if [ "$CLUSTER_MODE" = "NVLINK" ]; then
CUDA_VISIBLE_DEVICES='0' DASK_UCX__CUDA_COPY=True DASK_UCX__TCP=True DASK_UCX__NVLINK=True DASK_UCX__INFINIBAND=False DASK_UCX__RDMACM=False nohup dask-scheduler --dashboard-address 8787 --protocol ucx --scheduler-file $SCHEDULER_FILE > $LOGDIR/$HOSTNAME-scheduler.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "IB" ]; then
DASK_RMM__POOL_SIZE=1GB CUDA_VISIBLE_DEVICES='0' DASK_DISTRIBUTED__COMM__UCX__CREATE_CUDA_CONTEXT=True nohup dask-scheduler --dashboard-address 8787 --protocol ucx --interface ibp18s0 --scheduler-file $SCHEDULER_FILE > $LOGDIR/$HOSTNAME-scheduler.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "TCP" ]; then
CUDA_VISIBLE_DEVICES='0' nohup dask-scheduler --dashboard-address 8787 --protocol tcp --scheduler-file $SCHEDULER_FILE > $LOGDIR/$HOSTNAME-scheduler.log 2>&1 &
fi
fi
# Setup workers
if [ "$CLUSTER_MODE" = "NVLINK" ]; then
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $LOCAL_DIRECTORY --rmm-pool-size $POOL_SIZE --memory-limit $MAX_SYSTEM_MEMORY --enable-tcp-over-ucx --enable-nvlink --disable-infiniband --scheduler-file $SCHEDULER_FILE >> $LOGDIR/$HOSTNAME-worker.log 2>&1 &
fi
if [ "$CLUSTER_MODE" = "IB" ]; then
python -m dask_cuda.cli.dask_cuda_worker --rmm-pool-size $POOL_SIZE --scheduler-file $SCHEDULER_FILE --local-directory $LOCAL_DIRECTORY --interface ibp18s0 --enable-tcp-over-ucx --device-memory-limit $DEVICE_MEMORY_LIMIT 2>&1 | tee $LOGDIR/$HOSTNAME-worker.log &
fi
if [ "$CLUSTER_MODE" = "TCP" ]; then
dask-cuda-worker --device-memory-limit $DEVICE_MEMORY_LIMIT --local-directory $LOCAL_DIRECTORY --rmm-pool-size $POOL_SIZE --memory-limit $MAX_SYSTEM_MEMORY --scheduler-file $SCHEDULER_FILE >> $LOGDIR/$HOSTNAME-worker.log 2>&1 &
fi
| 0 |
rapidsai_public_repos/gpu-bdb/gpu_bdb | rapidsai_public_repos/gpu-bdb/gpu_bdb/benchmark_runner/wait.py | import os
import sys
import time
import yaml
from dask.distributed import Client
config_file = sys.argv[1]
with open(config_file) as fp:
conf = yaml.safe_load(fp)
expected_workers = int(os.environ.get("NUM_WORKERS", 16))
# use scheduler file path from global environment if none
# supplied in configuration yaml
scheduler_file_path = conf.get("scheduler_file_path")
if scheduler_file_path is None:
scheduler_file_path = os.environ.get("SCHEDULER_FILE")
ready = False
while not ready:
with Client(scheduler_file=scheduler_file_path) as client:
workers = client.scheduler_info()['workers']
if len(workers) < expected_workers:
print(f'Expected {expected_workers} but got {len(workers)}, waiting..')
time.sleep(10)
else:
print(f'Got all {len(workers)} workers')
ready = True
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.