repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/components/test_connectivity_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cudf
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# Directed graph is not currently supported
IS_DIRECTED = [False, True]
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_wcc(dask_client, directed):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "netscience.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
g = cugraph.Graph(directed=directed)
g.from_cudf_edgelist(df, "src", "dst", renumber=True)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, "src", "dst")
if not directed:
expected_dist = cugraph.weakly_connected_components(g)
result_dist = dcg.weakly_connected_components(dg)
result_dist = result_dist.compute()
compare_dist = expected_dist.merge(
result_dist, on="vertex", suffixes=["_local", "_dask"]
)
unique_local_labels = compare_dist["labels_local"].unique()
for label in unique_local_labels.values.tolist():
dask_labels_df = compare_dist[compare_dist["labels_local"] == label]
dask_labels = dask_labels_df["labels_dask"]
assert (dask_labels.iloc[0] == dask_labels).all()
else:
with pytest.raises(ValueError):
cugraph.weakly_connected_components(g)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/core/test_core_number_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED
degree_type = ["incoming", "outgoing", "bidirectional"]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(degree_type, "degree_type"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(zip(("graph_file", "degree_type"), request.param))
return parameters
@pytest.fixture(scope="module")
def input_expected_output(dask_client, input_combo):
"""
This fixture returns the inputs and expected results from the Core number
algo.
"""
degree_type = input_combo["degree_type"]
input_data_path = input_combo["graph_file"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=False, edgevals=True
)
input_combo["SGGraph"] = G
sg_core_number_results = cugraph.core_number(G, degree_type)
sg_core_number_results = sg_core_number_results.sort_values("vertex").reset_index(
drop=True
)
input_combo["sg_core_number_results"] = sg_core_number_results
input_combo["degree_type"] = degree_type
# Creating an edgelist from a dask cudf dataframe
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=False)
dg.from_dask_cudf_edgelist(
ddf, source="src", destination="dst", edge_attr="value", renumber=True
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
def test_sg_core_number(dask_client, benchmark, input_expected_output):
# This test is only for benchmark purposes.
sg_core_number_results = None
G = input_expected_output["SGGraph"]
degree_type = input_expected_output["degree_type"]
sg_core_number_results = benchmark(cugraph.core_number, G, degree_type)
assert sg_core_number_results is not None
@pytest.mark.mg
def test_core_number(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
degree_type = input_expected_output["degree_type"]
result_core_number = benchmark(dcg.core_number, dg, degree_type)
result_core_number = (
result_core_number.drop_duplicates()
.compute()
.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"core_number": "mg_core_number"})
)
expected_output = input_expected_output["sg_core_number_results"]
# Update the mg core number with sg core number results
# for easy comparison using cuDF DataFrame methods.
result_core_number["sg_core_number"] = expected_output["core_number"]
counts_diffs = result_core_number.query("mg_core_number != sg_core_number")
assert len(counts_diffs) == 0
@pytest.mark.mg
def test_core_number_invalid_input(input_expected_output):
input_data_path = (
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate-asymmetric.csv"
).as_posix()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
)
invalid_degree_type = 3
dg = input_expected_output["MGGraph"]
with pytest.raises(ValueError):
dcg.core_number(dg, invalid_degree_type)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/core/test_k_core_mg.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
from cudf.testing.testing import assert_frame_equal
from cugraph.structure.symmetrize import symmetrize_df
from pylibcugraph.testing import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED
core_number = [True, False]
degree_type = ["bidirectional", "outgoing", "incoming"]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"), (core_number, "core_number"), (degree_type, "degree_type")
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(zip(("graph_file", "core_number", "degree_type"), request.param))
return parameters
@pytest.fixture(scope="module")
def input_expected_output(dask_client, input_combo):
"""
This fixture returns the inputs and expected results from the Core number
algo.
"""
core_number = input_combo["core_number"]
degree_type = input_combo["degree_type"]
input_data_path = input_combo["graph_file"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=False, edgevals=True
)
if core_number:
# compute the core_number
core_number = cugraph.core_number(G, degree_type=degree_type)
else:
core_number = None
input_combo["core_number"] = core_number
input_combo["SGGraph"] = G
sg_k_core_graph = cugraph.k_core(
G, core_number=core_number, degree_type=degree_type
)
sg_k_core_results = sg_k_core_graph.view_edge_list()
# FIXME: The result will come asymetric. Symmetrize the results
srcCol = sg_k_core_graph.source_columns
dstCol = sg_k_core_graph.destination_columns
wgtCol = sg_k_core_graph.weight_column
sg_k_core_results = (
symmetrize_df(sg_k_core_results, srcCol, dstCol, wgtCol)
.sort_values([srcCol, dstCol])
.reset_index(drop=True)
)
input_combo["sg_k_core_results"] = sg_k_core_results
# Creating an edgelist from a dask cudf dataframe
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=False)
# FIXME: False when renumbering (C++ and python renumbering)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
def test_sg_k_core(dask_client, benchmark, input_expected_output):
# This test is only for benchmark purposes.
sg_k_core = None
G = input_expected_output["SGGraph"]
core_number = input_expected_output["core_number"]
degree_type = input_expected_output["degree_type"]
sg_k_core = benchmark(
cugraph.k_core, G, core_number=core_number, degree_type=degree_type
)
assert sg_k_core is not None
@pytest.mark.mg
def test_dask_mg_k_core(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
core_number = input_expected_output["core_number"]
k_core_results = benchmark(dcg.k_core, dg, core_number=core_number)
expected_k_core_results = input_expected_output["sg_k_core_results"]
k_core_results = (
k_core_results.compute()
.sort_values(["src", "dst"])
.reset_index(drop=True)
.rename(columns={"weights": "weight"})
)
assert_frame_equal(
expected_k_core_results, k_core_results, check_dtype=False, check_like=True
)
@pytest.mark.mg
def test_dask_mg_k_core_invalid_input(dask_client):
input_data_path = datasets[0]
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
store_transposed=True,
)
with pytest.raises(ValueError):
dcg.k_core(dg)
dg = cugraph.Graph(directed=False)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
store_transposed=True,
)
degree_type = "invalid"
with pytest.raises(ValueError):
dcg.k_core(dg, degree_type=degree_type)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/core/test_core_number.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
degree_type = ["incoming", "outgoing"]
fixture_params = gen_fixture_params_product(
(UNDIRECTED_DATASETS, "graph_file"),
(degree_type, "degree_type"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
This fixture returns a dictionary containing all input params required to
run a Core number algo
"""
parameters = dict(zip(("graph_file", "degree_type"), request.param))
graph_file = parameters["graph_file"]
G = graph_file.get_graph()
input_data_path = graph_file.get_path()
Gnx = utils.generate_nx_graph_from_file(
input_data_path, directed=False, edgevals=True
)
parameters["G"] = G
parameters["Gnx"] = Gnx
return parameters
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
def test_core_number(input_combo):
G = input_combo["G"]
Gnx = input_combo["Gnx"]
degree_type = input_combo["degree_type"]
nx_core_number_results = cudf.DataFrame()
dic_results = nx.core_number(Gnx)
nx_core_number_results["vertex"] = dic_results.keys()
nx_core_number_results["core_number"] = dic_results.values()
nx_core_number_results = nx_core_number_results.sort_values("vertex").reset_index(
drop=True
)
core_number_results = (
cugraph.core_number(G, degree_type)
.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"core_number": "cugraph_core_number"})
)
# Compare the nx core number results with cugraph
core_number_results["nx_core_number"] = nx_core_number_results["core_number"]
counts_diff = core_number_results.query("nx_core_number != cugraph_core_number")
assert len(counts_diff) == 0
@pytest.mark.sg
def test_core_number_invalid_input(input_combo):
input_data_path = (
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate-asymmetric.csv"
).as_posix()
M = utils.read_csv_for_nx(input_data_path)
G = cugraph.Graph(directed=True)
cu_M = cudf.DataFrame()
cu_M["src"] = cudf.Series(M["0"])
cu_M["dst"] = cudf.Series(M["1"])
cu_M["weights"] = cudf.Series(M["weight"])
G.from_cudf_edgelist(cu_M, source="src", destination="dst", edge_attr="weights")
with pytest.raises(ValueError):
cugraph.core_number(G)
invalid_degree_type = "invalid"
G = input_combo["G"]
with pytest.raises(ValueError):
cugraph.core_number(G, invalid_degree_type)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/core/test_k_core.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS
print("Networkx version : {} ".format(nx.__version__))
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def calc_k_cores(graph_file, directed=True):
# directed is used to create either a Graph or DiGraph so the returned
# cugraph can be compared to nx graph of same type.
dataset_path = graph_file.get_path()
NM = utils.read_csv_for_nx(dataset_path)
G = graph_file.get_graph(
create_using=cugraph.Graph(directed=directed), ignore_weights=True
)
if directed:
Gnx = nx.from_pandas_edgelist(
NM, source="0", target="1", create_using=nx.DiGraph()
)
else:
Gnx = nx.from_pandas_edgelist(
NM, source="0", target="1", create_using=nx.Graph()
)
ck = cugraph.k_core(G)
nk = nx.k_core(Gnx)
return ck, nk
def compare_edges(cg, nxg):
edgelist_df = cg.view_edge_list()
src, dest = edgelist_df["src"], edgelist_df["dst"]
assert cg.edgelist.weights is False
assert len(src) == nxg.size()
for i in range(len(src)):
assert nxg.has_edge(src[i], dest[i])
return True
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_k_core_Graph(graph_file):
cu_kcore, nx_kcore = calc_k_cores(graph_file, False)
assert compare_edges(cu_kcore, nx_kcore)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_k_core_Graph_nx(graph_file):
dataset_path = graph_file.get_path()
NM = utils.read_csv_for_nx(dataset_path)
Gnx = nx.from_pandas_edgelist(NM, source="0", target="1", create_using=nx.Graph())
nc = nx.k_core(Gnx)
cc = cugraph.k_core(Gnx)
assert nx.is_isomorphic(nc, cc)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_k_core_corenumber_multicolumn(graph_file):
dataset_path = graph_file.get_path()
cu_M = utils.read_csv_file(dataset_path)
cu_M.rename(columns={"0": "src_0", "1": "dst_0"}, inplace=True)
cu_M["src_1"] = cu_M["src_0"] + 1000
cu_M["dst_1"] = cu_M["dst_0"] + 1000
G1 = cugraph.Graph()
G1.from_cudf_edgelist(
cu_M, source=["src_0", "src_1"], destination=["dst_0", "dst_1"]
)
corenumber_G1 = cugraph.core_number(G1)
corenumber_G1.rename(columns={"core_number": "values"}, inplace=True)
corenumber_G1 = corenumber_G1[["0_vertex", "1_vertex", "values"]]
corenumber_G1 = None
ck_res = cugraph.k_core(G1, core_number=corenumber_G1)
G2 = cugraph.Graph()
G2.from_cudf_edgelist(cu_M, source="src_0", destination="dst_0", renumber=False)
corenumber_G2 = cugraph.core_number(G2)
corenumber_G2.rename(columns={"core_number": "values"}, inplace=True)
corenumber_G2 = corenumber_G2[["vertex", "values"]]
ck_exp = cugraph.k_core(G2, core_number=corenumber_G2)
# FIXME: Replace with multi-column view_edge_list()
edgelist_df = ck_res.edgelist.edgelist_df
edgelist_df_res = ck_res.unrenumber(edgelist_df, "src")
edgelist_df_res = ck_res.unrenumber(edgelist_df_res, "dst")
for i in range(len(edgelist_df_res)):
assert ck_exp.has_edge(
edgelist_df_res["0_src"].iloc[i], edgelist_df_res["0_dst"].iloc[i]
)
@pytest.mark.sg
def test_k_core_invalid_input():
karate = UNDIRECTED_DATASETS[0]
G = karate.get_graph(create_using=cugraph.Graph(directed=True))
with pytest.raises(ValueError):
cugraph.k_core(G)
G = karate.get_graph()
degree_type = "invalid"
with pytest.raises(ValueError):
cugraph.k_core(G, degree_type=degree_type)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_subgraph_extraction.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import numpy as np
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, DEFAULT_DATASETS
from cugraph.datasets import karate
###############################################################################
# pytest setup - called for each test function
def setup_function():
gc.collect()
###############################################################################
def compare_edges(cg, nxg):
edgelist_df = cg.view_edge_list()
assert len(edgelist_df) == nxg.size()
for i in range(len(edgelist_df)):
assert nxg.has_edge(edgelist_df["src"].iloc[i], edgelist_df["dst"].iloc[i])
return True
def cugraph_call(M, verts, directed=True):
# cugraph can be compared to nx graph of same type.
G = cugraph.Graph(directed=directed)
cu_M = cudf.from_pandas(M)
# FIXME: Add the column name in a list to trigger the python renumbering
# Drop this requirement when 'subgraph_extraction' leverages the CAPI graph
# which calls renumbering
G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="weight")
cu_verts = cudf.Series(verts)
return cugraph.subgraph(G, cu_verts)
def nx_call(M, verts, directed=True):
if directed:
G = nx.from_pandas_edgelist(
M, source="0", target="1", create_using=nx.DiGraph()
)
else:
G = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.Graph())
return nx.subgraph(G, verts)
###############################################################################
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_subgraph_extraction_DiGraph(graph_file):
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
verts = np.zeros(3, dtype=np.int32)
verts[0] = 0
verts[1] = 1
verts[2] = 17
cu_sg = cugraph_call(M, verts, True)
nx_sg = nx_call(M, verts, True)
assert compare_edges(cu_sg, nx_sg)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_subgraph_extraction_Graph(graph_file):
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
verts = np.zeros(3, dtype=np.int32)
verts[0] = 0
verts[1] = 1
verts[2] = 17
cu_sg = cugraph_call(M, verts, False)
nx_sg = nx_call(M, verts, False)
assert compare_edges(cu_sg, nx_sg)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", [DEFAULT_DATASETS[2]])
def test_subgraph_extraction_Graph_nx(graph_file):
directed = False
verts = np.zeros(3, dtype=np.int32)
verts[0] = 0
verts[1] = 1
verts[2] = 17
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
if directed:
G = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
else:
G = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.Graph()
)
nx_sub = nx.subgraph(G, verts)
cu_verts = cudf.Series(verts)
cu_sub = cugraph.subgraph(G, cu_verts)
for (u, v) in cu_sub.edges():
assert nx_sub.has_edge(u, v)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_subgraph_extraction_multi_column(graph_file):
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
cu_M = cudf.DataFrame()
cu_M["src_0"] = cudf.Series(M["0"])
cu_M["dst_0"] = cudf.Series(M["1"])
cu_M["weight"] = cudf.Series(M["weight"])
cu_M["src_1"] = cu_M["src_0"] + 1000
cu_M["dst_1"] = cu_M["dst_0"] + 1000
G1 = cugraph.Graph()
G1.from_cudf_edgelist(
cu_M,
source=["src_0", "src_1"],
destination=["dst_0", "dst_1"],
edge_attr="weight",
)
verts = cudf.Series([0, 1, 17])
verts_G1 = cudf.DataFrame()
verts_G1["v_0"] = verts
verts_G1["v_1"] = verts + 1000
sG1 = cugraph.subgraph(G1, verts_G1)
G2 = cugraph.Graph()
G2.from_cudf_edgelist(cu_M, source="src_0", destination="dst_0", edge_attr="weight")
sG2 = cugraph.subgraph(G2, verts)
# FIXME: Replace with multi-column view_edge_list()
edgelist_df = sG1.edgelist.edgelist_df
edgelist_df_res = sG1.unrenumber(edgelist_df, "src")
edgelist_df_res = sG1.unrenumber(edgelist_df_res, "dst")
for i in range(len(edgelist_df_res)):
assert sG2.has_edge(
edgelist_df_res["0_src"].iloc[i], edgelist_df_res["0_dst"].iloc[i]
)
# FIXME: the coverage provided by this test could probably be handled by
# another test that also checks using renumber=False
# FIXME: Drop this test as 'subgraph_extraction' requires renumbering
@pytest.mark.sg
@pytest.mark.skip("obsolete")
def test_subgraph_extraction_graph_not_renumbered():
"""
Ensure subgraph() works with a Graph that has not been renumbered
"""
gdf = karate.get_edgelist()
verts = np.array([0, 1, 2], dtype=np.int32)
sverts = cudf.Series(verts)
G = cugraph.Graph()
G.from_cudf_edgelist(
gdf, source="src", destination="dst", edge_attr="wgt", renumber=False
)
Sg = cugraph.subgraph(G, sverts)
assert Sg.number_of_vertices() == 3
assert Sg.number_of_edges() == 3
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_louvain.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import time
import pytest
import networkx as nx
import cugraph
import cupyx
import cudf
from cugraph.testing import utils, UNDIRECTED_DATASETS
from cugraph.datasets import karate_asymmetric
try:
import community
except ModuleNotFoundError:
pytest.exit(
"community module not found\n"
"The python-louvain module needs to be installed\n"
"please run `pip install python-louvain`"
)
print("Networkx version : {} ".format(nx.__version__))
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def cugraph_call(graph_file, edgevals=False, directed=False):
G = graph_file.get_graph(
create_using=cugraph.Graph(directed=directed), ignore_weights=not edgevals
)
# cugraph Louvain Call
t1 = time.time()
parts, mod = cugraph.louvain(G)
t2 = time.time() - t1
print("Cugraph Time : " + str(t2))
return parts, mod
def networkx_call(M):
# z = {k: 1.0/M.shape[0] for k in range(M.shape[0])}
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.Graph()
)
# Networkx louvain Call
print("Solving... ")
t1 = time.time()
parts = community.best_partition(Gnx)
t2 = time.time() - t1
print("Networkx Time : " + str(t2))
return parts
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_louvain(graph_file):
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
cu_parts, cu_mod = cugraph_call(graph_file, edgevals=True)
nx_parts = networkx_call(M)
# Calculating modularity scores for comparison
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.Graph()
)
cu_parts = cu_parts.to_pandas()
cu_map = dict(zip(cu_parts["vertex"], cu_parts["partition"]))
assert set(nx_parts.keys()) == set(cu_map.keys())
cu_mod_nx = community.modularity(cu_map, Gnx)
nx_mod = community.modularity(nx_parts, Gnx)
assert len(cu_parts) == len(nx_parts)
assert cu_mod > (0.82 * nx_mod)
assert abs(cu_mod - cu_mod_nx) < 0.0001
@pytest.mark.sg
def test_louvain_directed_graph():
with pytest.raises(ValueError):
cugraph_call(karate_asymmetric, edgevals=True, directed=True)
@pytest.mark.sg
@pytest.mark.parametrize("is_weighted", [True, False])
def test_louvain_csr_graph(is_weighted):
karate = UNDIRECTED_DATASETS[0]
df = karate.get_edgelist()
M = cupyx.scipy.sparse.coo_matrix(
(df["wgt"].to_cupy(), (df["src"].to_cupy(), df["dst"].to_cupy()))
)
M = M.tocsr()
offsets = cudf.Series(M.indptr)
indices = cudf.Series(M.indices)
weights = cudf.Series(M.data)
G_csr = cugraph.Graph()
G_coo = karate.get_graph()
if not is_weighted:
weights = None
G_csr.from_cudf_adjlist(offsets, indices, weights)
assert G_csr.is_weighted() is is_weighted
louvain_csr, mod_csr = cugraph.louvain(G_csr)
louvain_coo, mod_coo = cugraph.louvain(G_coo)
louvain_csr = louvain_csr.sort_values("vertex").reset_index(drop=True)
result_louvain = (
louvain_coo.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"partition": "partition_coo"})
)
result_louvain["partition_csr"] = louvain_csr["partition"]
parition_diffs = result_louvain.query("partition_csr != partition_coo")
assert len(parition_diffs) == 0
assert mod_csr == mod_coo
@pytest.mark.sg
def test_louvain_nx_graph_with_isolated_nodes():
# Cluster IDs are expected to unique if all nodes are isolated
G = nx.Graph()
G.add_nodes_from(range(5))
result, _ = cugraph.louvain(G)
assert set(result.keys()) == set(G.nodes)
assert len(set(result.values())) == G.number_of_nodes()
# A graph with 5 nodes, where 3 of the nodes are isolated
G.add_edge(1, 2)
result, _ = cugraph.louvain(G)
assert set(result.keys()) == set(G.nodes)
assert len(set(result.values())) == G.number_of_nodes() - 1
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_modularity.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, DEFAULT_DATASETS
from cugraph.utilities import ensure_cugraph_obj_for_nx
def cugraph_call(G, partitions):
df = cugraph.spectralModularityMaximizationClustering(
G, partitions, num_eigen_vects=(partitions - 1)
)
score = cugraph.analyzeClustering_modularity(G, partitions, df, "vertex", "cluster")
return score
def random_call(G, partitions):
random.seed(0)
num_verts = G.number_of_vertices()
assignment = []
for i in range(num_verts):
assignment.append(random.randint(0, partitions - 1))
assignment_cu = cudf.DataFrame(assignment, columns=["cluster"])
assignment_cu["vertex"] = assignment_cu.index
assignment_cu = assignment_cu.astype("int32")
score = cugraph.analyzeClustering_modularity(
G, partitions, assignment_cu, "vertex", "cluster"
)
return score
PARTITIONS = [2, 4, 8]
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("partitions", PARTITIONS)
def test_modularity_clustering(graph_file, partitions):
gc.collect()
# Read in the graph and get a cugraph object
G = graph_file.get_graph()
# read_weights_in_sp=False => value column dtype is float64
G.edgelist.edgelist_df["weights"] = G.edgelist.edgelist_df["weights"].astype(
"float64"
)
# Get the modularity score for partitioning versus random assignment
cu_score = cugraph_call(G, partitions)
rand_score = random_call(G, partitions)
# Assert that the partitioning has better modularity than the random
# assignment
assert cu_score > rand_score
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("partitions", PARTITIONS)
def test_modularity_clustering_nx(graph_file, partitions):
# Read in the graph and get a cugraph object
dataset_path = graph_file.get_path()
csv_data = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
nxG = nx.from_pandas_edgelist(
csv_data,
source="0",
target="1",
edge_attr="weight",
create_using=nx.Graph(),
)
assert nx.is_directed(nxG) is False
assert nx.is_weighted(nxG) is True
cuG, isNx = ensure_cugraph_obj_for_nx(nxG)
assert cugraph.is_directed(cuG) is False
assert cugraph.is_weighted(cuG) is True
# Get the modularity score for partitioning versus random assignment
cu_score = cugraph_call(cuG, partitions)
rand_score = random_call(cuG, partitions)
# Assert that the partitioning has better modularity than the random
# assignment
assert cu_score > rand_score
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("partitions", PARTITIONS)
def test_modularity_clustering_multi_column(graph_file, partitions):
# Read in the graph and get a cugraph object
dataset_path = graph_file.get_path()
cu_M = utils.read_csv_file(dataset_path, read_weights_in_sp=False)
cu_M.rename(columns={"0": "src_0", "1": "dst_0"}, inplace=True)
cu_M["src_1"] = cu_M["src_0"] + 1000
cu_M["dst_1"] = cu_M["dst_0"] + 1000
G1 = cugraph.Graph()
G1.from_cudf_edgelist(
cu_M, source=["src_0", "src_1"], destination=["dst_0", "dst_1"], edge_attr="2"
)
df1 = cugraph.spectralModularityMaximizationClustering(
G1, partitions, num_eigen_vects=(partitions - 1)
)
cu_score = cugraph.analyzeClustering_modularity(
G1, partitions, df1, ["0_vertex", "1_vertex"], "cluster"
)
G2 = cugraph.Graph()
G2.from_cudf_edgelist(cu_M, source="src_0", destination="dst_0", edge_attr="2")
rand_score = random_call(G2, partitions)
# Assert that the partitioning has better modularity than the random
# assignment
assert cu_score > rand_score
# Test to ensure DiGraph objs are not accepted
# Test all combinations of default/managed and pooled/non-pooled allocation
@pytest.mark.sg
def test_digraph_rejected():
df = cudf.DataFrame()
df["src"] = cudf.Series(range(10))
df["dst"] = cudf.Series(range(10))
df["val"] = cudf.Series(range(10))
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
df, source="src", destination="dst", edge_attr="val", renumber=False
)
with pytest.raises(ValueError):
cugraph_call(G, 2)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_triangle_count_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import cudf
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
([True, False], "start_list"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(zip(("graph_file", "start_list", "edgevals"), request.param))
return parameters
@pytest.fixture(scope="module")
def input_expected_output(dask_client, input_combo):
"""
This fixture returns the inputs and expected results from the triangle
count algo.
"""
start_list = input_combo["start_list"]
input_data_path = input_combo["graph_file"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=False, edgevals=True
)
input_combo["SGGraph"] = G
if start_list:
# sample k nodes from the cuGraph graph
k = random.randint(1, 10)
srcs = G.view_edge_list()[G.source_columns]
dsts = G.view_edge_list()[G.destination_columns]
nodes = cudf.concat([srcs, dsts]).drop_duplicates()
start_list = nodes.sample(k)
else:
start_list = None
sg_triangle_results = cugraph.triangle_count(G, start_list)
sg_triangle_results = sg_triangle_results.sort_values("vertex").reset_index(
drop=True
)
input_combo["sg_triangle_results"] = sg_triangle_results
input_combo["start_list"] = start_list
# Creating an edgelist from a dask cudf dataframe
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=False)
dg.from_dask_cudf_edgelist(
ddf, source="src", destination="dst", edge_attr="value", renumber=True
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
def test_sg_triangles(dask_client, benchmark, input_expected_output):
# This test is only for benchmark purposes.
sg_triangle_results = None
G = input_expected_output["SGGraph"]
start_list = input_expected_output["start_list"]
sg_triangle_results = benchmark(cugraph.triangle_count, G, start_list)
assert sg_triangle_results is not None
@pytest.mark.mg
def test_triangles(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
start_list = input_expected_output["start_list"]
result_counts = benchmark(dcg.triangle_count, dg, start_list)
result_counts = (
result_counts.drop_duplicates()
.compute()
.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"counts": "mg_counts"})
)
expected_output = input_expected_output["sg_triangle_results"]
# Update the mg triangle count with sg triangle count results
# for easy comparison using cuDF DataFrame methods.
result_counts["sg_counts"] = expected_output["counts"]
counts_diffs = result_counts.query("mg_counts != sg_counts")
assert len(counts_diffs) == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_induced_subgraph_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cudf
from cudf.testing.testing import assert_frame_equal
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
from cugraph.dask.common.mg_utils import is_single_gpu
from pylibcugraph.testing import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
NUM_SEEDS = [2, 5, 10, 20]
# FIXME: This parameter will be tested in the next release when updating the
# SG implementation
OFFSETS = [None]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED + [
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv"
]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
(NUM_SEEDS, "num_seeds"),
(OFFSETS, "offsets"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(("graph_file", "directed", "seeds", "offsets"), request.param)
)
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the induced_subgraph algo.
(based on cuGraph subgraph) which can be used for validation.
"""
input_data_path = input_combo["graph_file"]
directed = input_combo["directed"]
num_seeds = input_combo["seeds"]
# FIXME: This parameter is not tested
# offsets= input_combo["offsets"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=directed, edgevals=True
)
# Sample k vertices from the cuGraph graph
# FIXME: Leverage the method 'select_random_vertices' instead
srcs = G.view_edge_list()["0"]
dsts = G.view_edge_list()["1"]
vertices = cudf.concat([srcs, dsts]).drop_duplicates()
vertices = vertices.sample(num_seeds, replace=True).astype("int32")
# print randomly sample n seeds from the graph
print("\nvertices: \n", vertices)
input_combo["vertices"] = vertices
sg_induced_subgraph, _ = cugraph.induced_subgraph(G, vertices=vertices)
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
input_combo["sg_cugraph_results"] = sg_induced_subgraph
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
store_transposed=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
def test_mg_induced_subgraph(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
vertices = input_expected_output["vertices"]
result_induced_subgraph = benchmark(
dcg.induced_subgraph,
dg,
vertices,
input_expected_output["offsets"],
)
mg_df, mg_offsets = result_induced_subgraph
# mg_offsets = mg_offsets.compute().reset_index(drop=True)
sg = input_expected_output["sg_cugraph_results"]
if mg_df is not None and sg is not None:
# FIXME: 'edges()' or 'view_edgelist()' takes half the edges out if
# 'directed=False'.
sg_result = sg.input_df
sg_df = sg_result.sort_values(["src", "dst"]).reset_index(drop=True)
mg_df = mg_df.compute().sort_values(["src", "dst"]).reset_index(drop=True)
assert_frame_equal(sg_df, mg_df, check_dtype=False, check_like=True)
else:
# There is no edges between the vertices provided
# FIXME: Once k-hop neighbors is implemented, find one hop neighbors
# of all the vertices and ensure that there is None
assert sg is None
assert mg_df is None
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_k_truss_subgraph.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import numpy as np
import cugraph
from cugraph.testing import utils
from cugraph.datasets import polbooks, karate_asymmetric
from numba import cuda
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# These ground truth files have been created by running the networkx ktruss
# function on reference graphs. Currently networkx ktruss has an error such
# that nx.k_truss(G,k-2) gives the expected result for running ktruss with
# parameter k. This fix (https://github.com/networkx/networkx/pull/3713) is
# currently in networkx master and will hopefully will make it to a release
# soon.
def ktruss_ground_truth(graph_file):
G = nx.read_edgelist(str(graph_file), nodetype=int, data=(("weight", float),))
df = nx.to_pandas_edgelist(G)
return df
def compare_k_truss(k_truss_cugraph, k, ground_truth_file):
k_truss_nx = ktruss_ground_truth(ground_truth_file)
edgelist_df = k_truss_cugraph.view_edge_list()
src = edgelist_df["src"]
dst = edgelist_df["dst"]
wgt = edgelist_df["weight"]
assert len(edgelist_df) == len(k_truss_nx)
for i in range(len(src)):
has_edge = (
(k_truss_nx["source"] == src[i])
& (k_truss_nx["target"] == dst[i])
& np.isclose(k_truss_nx["weight"], wgt[i])
).any()
has_opp_edge = (
(k_truss_nx["source"] == dst[i])
& (k_truss_nx["target"] == src[i])
& np.isclose(k_truss_nx["weight"], wgt[i])
).any()
assert has_edge or has_opp_edge
return True
__cuda_version = cuda.runtime.get_version()
__unsupported_cuda_version = (11, 4)
# FIXME: remove when ktruss is supported on CUDA 11.4
@pytest.mark.sg
def test_unsupported_cuda_version():
"""
Ensures the proper exception is raised when ktruss is called in an
unsupported env, and not when called in a supported env.
"""
k = 5
G = polbooks.get_graph(download=True)
if __cuda_version == __unsupported_cuda_version:
with pytest.raises(NotImplementedError):
cugraph.k_truss(G, k)
else:
cugraph.k_truss(G, k)
@pytest.mark.sg
@pytest.mark.skipif(
(__cuda_version == __unsupported_cuda_version),
reason="skipping on unsupported CUDA " f"{__unsupported_cuda_version} environment.",
)
@pytest.mark.parametrize("_, nx_ground_truth", utils.DATASETS_KTRUSS)
def test_ktruss_subgraph_Graph(_, nx_ground_truth):
k = 5
G = polbooks.get_graph(download=True, create_using=cugraph.Graph(directed=False))
k_subgraph = cugraph.ktruss_subgraph(G, k)
compare_k_truss(k_subgraph, k, nx_ground_truth)
@pytest.mark.sg
@pytest.mark.skipif(
(__cuda_version == __unsupported_cuda_version),
reason="skipping on unsupported CUDA " f"{__unsupported_cuda_version} environment.",
)
def test_ktruss_subgraph_Graph_nx():
k = 5
dataset_path = polbooks.get_path()
M = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
G = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.Graph()
)
k_subgraph = cugraph.k_truss(G, k)
k_truss_nx = nx.k_truss(G, k)
assert nx.is_isomorphic(k_subgraph, k_truss_nx)
@pytest.mark.sg
@pytest.mark.skipif(
(__cuda_version == __unsupported_cuda_version),
reason="skipping on unsupported CUDA " f"{__unsupported_cuda_version} environment.",
)
def test_ktruss_subgraph_directed_Graph():
k = 5
edgevals = True
G = karate_asymmetric.get_graph(
download=True,
create_using=cugraph.Graph(directed=True),
ignore_weights=not edgevals,
)
with pytest.raises(ValueError):
cugraph.k_truss(G, k)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_leiden_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
try:
from rapids_pytest_benchmark import setFixtureParamNames
except ImportError:
print(
"\n\nWARNING: rapids_pytest_benchmark is not installed, "
"falling back to pytest_benchmark fixtures.\n"
)
# if rapids_pytest_benchmark is not available, just perfrom time-only
# benchmarking and replace the util functions with nops
import pytest_benchmark
gpubenchmark = pytest_benchmark.plugin.benchmark
def setFixtureParamNames(*args, **kwargs):
pass
# =============================================================================
# Parameters
# =============================================================================
DATASETS_ASYMMETRIC = [utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate-asymmetric.csv"]
###############################################################################
# Fixtures
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.fixture(
scope="module",
params=DATASETS_ASYMMETRIC,
ids=[f"dataset={d.as_posix()}" for d in DATASETS_ASYMMETRIC],
)
def daskGraphFromDataset(request, dask_client):
"""
Returns a new dask dataframe created from the dataset file param.
This creates a directed Graph.
"""
# Since parameterized fixtures do not assign param names to param values,
# manually call the helper to do so.
setFixtureParamNames(request, ["dataset"])
dataset = request.param
chunksize = dcg.get_chunksize(dataset)
ddf = dask_cudf.read_csv(
dataset,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value")
return dg
@pytest.fixture(
scope="module",
params=utils.DATASETS_UNDIRECTED,
ids=[f"dataset={d.as_posix()}" for d in utils.DATASETS_UNDIRECTED],
)
def uddaskGraphFromDataset(request, dask_client):
"""
Returns a new dask dataframe created from the dataset file param.
This creates an undirected Graph.
"""
# Since parameterized fixtures do not assign param names to param
# values, manually call the helper to do so.
setFixtureParamNames(request, ["dataset"])
dataset = request.param
chunksize = dcg.get_chunksize(dataset)
ddf = dask_cudf.read_csv(
dataset,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=False)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value")
return dg
###############################################################################
# Tests
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
# FIXME: Implement more robust tests
@pytest.mark.mg
def test_mg_leiden_with_edgevals_directed_graph(daskGraphFromDataset):
# Directed graphs are not supported by Leiden and a ValueError should be
# raised
with pytest.raises(ValueError):
parts, mod = dcg.leiden(daskGraphFromDataset)
###############################################################################
# Tests
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
# FIXME: Implement more robust tests
@pytest.mark.mg
def test_mg_leiden_with_edgevals_undirected_graph(uddaskGraphFromDataset):
parts, mod = dcg.leiden(uddaskGraphFromDataset)
# FIXME: either call Nx with the same dataset and compare results, or
# hardcode golden results to compare to.
print()
print(parts.compute())
print(mod)
print()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_louvain_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cugraph.dask as dcg
import cugraph
import dask_cudf
from cugraph.testing import utils
try:
from rapids_pytest_benchmark import setFixtureParamNames
except ImportError:
print(
"\n\nWARNING: rapids_pytest_benchmark is not installed, "
"falling back to pytest_benchmark fixtures.\n"
)
# if rapids_pytest_benchmark is not available, just perfrom time-only
# benchmarking and replace the util functions with nops
import pytest_benchmark
gpubenchmark = pytest_benchmark.plugin.benchmark
def setFixtureParamNames(*args, **kwargs):
pass
# =============================================================================
# Parameters
# =============================================================================
DATASETS_ASYMMETRIC = [utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate-asymmetric.csv"]
###############################################################################
# Fixtures
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.fixture(
scope="module",
params=DATASETS_ASYMMETRIC,
ids=[f"dataset={d.as_posix()}" for d in DATASETS_ASYMMETRIC],
)
def daskGraphFromDataset(request, dask_client):
"""
Returns a new dask dataframe created from the dataset file param.
This creates a directed Graph.
"""
# Since parameterized fixtures do not assign param names to param values,
# manually call the helper to do so.
setFixtureParamNames(request, ["dataset"])
dataset = request.param
chunksize = dcg.get_chunksize(dataset)
ddf = dask_cudf.read_csv(
dataset,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value")
return dg
@pytest.fixture(
scope="module",
params=utils.DATASETS_UNDIRECTED,
ids=[f"dataset={d.as_posix()}" for d in utils.DATASETS_UNDIRECTED],
)
def uddaskGraphFromDataset(request, dask_client):
"""
Returns a new dask dataframe created from the dataset file param.
This creates an undirected Graph.
"""
# Since parameterized fixtures do not assign param names to param
# values, manually call the helper to do so.
setFixtureParamNames(request, ["dataset"])
dataset = request.param
chunksize = dcg.get_chunksize(dataset)
ddf = dask_cudf.read_csv(
dataset,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=False)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value")
return dg
###############################################################################
# Tests
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
# FIXME: Implement more robust tests
@pytest.mark.mg
def test_mg_louvain_with_edgevals_directed_graph(daskGraphFromDataset):
# Directed graphs are not supported by Louvain and a ValueError should be
# raised
with pytest.raises(ValueError):
parts, mod = dcg.louvain(daskGraphFromDataset)
###############################################################################
# Tests
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
# FIXME: Implement more robust tests
@pytest.mark.mg
def test_mg_louvain_with_edgevals_undirected_graph(uddaskGraphFromDataset):
parts, mod = dcg.louvain(uddaskGraphFromDataset)
# FIXME: either call Nx with the same dataset and compare results, or
# hardcode golden results to compare to.
print()
print(parts.compute())
print(mod)
print()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_leiden.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import time
import pytest
import networkx as nx
import cugraph
import cudf
from cugraph.testing import utils, UNDIRECTED_DATASETS
from cugraph.datasets import karate_asymmetric
# =============================================================================
# Test data
# =============================================================================
_test_data = {
"data_1": {
"graph": {
"src_or_offset_array": [0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5],
"dst_or_index_array": [1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4],
# fmt: off
"weight": [0.1, 2.1, 1.1, 5.1, 3.1, 4.1, 7.2, 3.2, 0.1, 2.1, 1.1, 5.1,
3.1, 4.1, 7.2, 3.2],
# fmt: on
},
"max_level": 10,
"resolution": 1.0,
"input_type": "COO",
"expected_output": {
"partition": [0, 0, 0, 1, 1, 1],
"modularity_score": 0.215969,
},
},
"data_2": {
"graph": {
# fmt: off
"src_or_offset_array": [0, 16, 25, 35, 41, 44, 48, 52, 56, 61, 63, 66,
67, 69, 74, 76, 78, 80, 82, 84, 87, 89, 91, 93,
98, 101, 104, 106, 110, 113, 117, 121, 127, 139,
156],
"dst_or_index_array": [1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 17, 19, 21,
31, 0, 2, 3, 7, 13, 17, 19, 21, 30, 0, 1, 3, 7, 8,
9, 13, 27, 28, 32, 0, 1, 2, 7, 12, 13, 0, 6, 10, 0,
6, 10, 16, 0, 4, 5, 16, 0, 1, 2, 3, 0, 2, 30, 32,
33, 2, 33, 0, 4, 5, 0, 0, 3, 0, 1, 2, 3, 33, 32, 33,
32, 33, 5, 6, 0, 1, 32, 33, 0, 1, 33, 32, 33, 0, 1,
32, 33, 25, 27, 29, 32, 33, 25, 27, 31, 23, 24, 31,
29, 33, 2, 23, 24, 33, 2, 31, 33, 23, 26, 32, 33, 1,
8, 32, 33, 0, 24, 25, 28, 32, 33, 2, 8, 14, 15, 18,
20, 22, 23, 29, 30, 31, 33, 8, 9, 13, 14, 15, 18, 19,
20, 22, 23, 26, 27, 28, 29, 30, 31, 32],
"weight": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
# fmt: on
},
"max_level": 40,
"resolution": 1.0,
"input_type": "CSR",
"expected_output": {
# fmt: off
"partition": [3, 3, 3, 3, 2, 2, 2, 3, 1, 3, 2, 3, 3, 3, 1, 1, 2, 3, 1, 3,
1, 3, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1],
# fmt: on
"modularity_score": 0.41880345,
},
},
}
# =============================================================================
# Pytest fixtures
# =============================================================================
@pytest.fixture(
scope="module",
params=[pytest.param(value, id=key) for (key, value) in _test_data.items()],
)
def input_and_expected_output(request):
d = request.param.copy()
input_graph_data = d.pop("graph")
input_type = d.pop("input_type")
src_or_offset_array = cudf.Series(
input_graph_data["src_or_offset_array"], dtype="int32"
)
dst_or_index_array = cudf.Series(
input_graph_data["dst_or_index_array"], dtype="int32"
)
weight = cudf.Series(input_graph_data["weight"], dtype="float32")
max_level = d.pop("max_level")
resolution = d.pop("resolution")
output = d
G = cugraph.Graph()
if input_type == "COO":
# Create graph from an edgelist
df = cudf.DataFrame()
df["src"] = src_or_offset_array
df["dst"] = dst_or_index_array
df["weight"] = cudf.Series(weight, dtype="float32")
G.from_cudf_edgelist(
df,
source="src",
destination="dst",
edge_attr="weight",
store_transposed=False,
)
elif input_type == "CSR":
# Create graph from csr
offsets = src_or_offset_array
indices = dst_or_index_array
G.from_cudf_adjlist(offsets, indices, weight, renumber=False)
parts, mod = cugraph.leiden(G, max_level, resolution)
parts = parts.sort_values("vertex").reset_index(drop=True)
output["result_output"] = {"partition": parts["partition"], "modularity_score": mod}
return output
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def cugraph_leiden(G):
# cugraph Louvain Call
t1 = time.time()
parts, mod = cugraph.leiden(G)
t2 = time.time() - t1
print("Cugraph Leiden Time : " + str(t2))
return parts, mod
def cugraph_louvain(G):
# cugraph Louvain Call
t1 = time.time()
parts, mod = cugraph.louvain(G)
t2 = time.time() - t1
print("Cugraph Louvain Time : " + str(t2))
return parts, mod
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_leiden(graph_file):
edgevals = True
G = graph_file.get_graph(ignore_weights=not edgevals)
leiden_parts, leiden_mod = cugraph_leiden(G)
louvain_parts, louvain_mod = cugraph_louvain(G)
# Leiden modularity score is smaller than Louvain's
assert leiden_mod >= (0.75 * louvain_mod)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_leiden_nx(graph_file):
dataset_path = graph_file.get_path()
NM = utils.read_csv_for_nx(dataset_path)
G = nx.from_pandas_edgelist(
NM, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
leiden_parts, leiden_mod = cugraph_leiden(G)
louvain_parts, louvain_mod = cugraph_louvain(G)
# Calculating modularity scores for comparison
# Leiden modularity score is smaller than Louvain's
assert leiden_mod >= (0.75 * louvain_mod)
@pytest.mark.sg
def test_leiden_directed_graph():
edgevals = True
G = karate_asymmetric.get_graph(
create_using=cugraph.Graph(directed=True), ignore_weights=not edgevals
)
with pytest.raises(ValueError):
parts, mod = cugraph_leiden(G)
@pytest.mark.sg
def test_leiden_golden_results(input_and_expected_output):
expected_partition = input_and_expected_output["expected_output"]["partition"]
expected_mod = input_and_expected_output["expected_output"]["modularity_score"]
result_partition = input_and_expected_output["result_output"]["partition"]
result_mod = input_and_expected_output["result_output"]["modularity_score"]
assert abs(expected_mod - result_mod) < 0.0001
expected_to_result_map = {}
for e, r in zip(expected_partition, list(result_partition.to_pandas())):
if e in expected_to_result_map.keys():
assert r == expected_to_result_map[e]
else:
expected_to_result_map[e] = r
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_triangle_count.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import networkx as nx
import pytest
import cudf
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS
from cugraph.datasets import karate_asymmetric
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = UNDIRECTED_DATASETS
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
([True, False], "edgevals"),
([True, False], "start_list"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
This fixture returns a dictionary containing all input params required to
run a Triangle Count algo
"""
parameters = dict(zip(("graph_file", "edgevals", "start_list"), request.param))
graph_file = parameters["graph_file"]
input_data_path = graph_file.get_path()
edgevals = parameters["edgevals"]
G = graph_file.get_graph(ignore_weights=not edgevals)
Gnx = utils.generate_nx_graph_from_file(
input_data_path, directed=False, edgevals=edgevals
)
parameters["G"] = G
parameters["Gnx"] = Gnx
return parameters
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
def test_triangles(input_combo):
G = input_combo["G"]
Gnx = input_combo["Gnx"]
nx_triangle_results = cudf.DataFrame()
if input_combo["start_list"]:
# sample k nodes from the nx graph
k = random.randint(1, 10)
start_list = random.sample(list(Gnx.nodes()), k)
else:
start_list = None
cugraph_triangle_results = cugraph.triangle_count(G, start_list)
triangle_results = (
cugraph_triangle_results.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"counts": "cugraph_counts"})
)
dic_results = nx.triangles(Gnx, start_list)
nx_triangle_results["vertex"] = dic_results.keys()
nx_triangle_results["counts"] = dic_results.values()
nx_triangle_results = nx_triangle_results.sort_values("vertex").reset_index(
drop=True
)
triangle_results["nx_counts"] = nx_triangle_results["counts"]
counts_diff = triangle_results.query("nx_counts != cugraph_counts")
assert len(counts_diff) == 0
@pytest.mark.sg
def test_triangles_int64(input_combo):
Gnx = input_combo["Gnx"]
count_legacy_32 = cugraph.triangle_count(Gnx)
graph_file = input_combo["graph_file"]
G = graph_file.get_graph()
G.edgelist.edgelist_df = G.edgelist.edgelist_df.astype(
{"src": "int64", "dst": "int64"}
)
count_exp_64 = (
cugraph.triangle_count(G)
.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"counts": "exp_cugraph_counts"})
)
cugraph_exp_triangle_results = count_exp_64["exp_cugraph_counts"].sum()
assert G.edgelist.edgelist_df["src"].dtype == "int64"
assert G.edgelist.edgelist_df["dst"].dtype == "int64"
assert cugraph_exp_triangle_results == count_legacy_32
@pytest.mark.sg
def test_triangles_no_weights(input_combo):
G_weighted = input_combo["Gnx"]
count_legacy = (
cugraph.triangle_count(G_weighted)
.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"counts": "exp_cugraph_counts"})
)
graph_file = input_combo["graph_file"]
G = graph_file.get_graph(ignore_weights=True)
assert G.is_weighted() is False
triangle_count = (
cugraph.triangle_count(G)
.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"counts": "exp_cugraph_counts"})
)
cugraph_exp_triangle_results = triangle_count["exp_cugraph_counts"].sum()
assert cugraph_exp_triangle_results == count_legacy
@pytest.mark.sg
def test_triangles_directed_graph():
input_data_path = karate_asymmetric.get_path()
M = utils.read_csv_for_nx(input_data_path)
G = cugraph.Graph(directed=True)
cu_M = cudf.DataFrame()
cu_M["src"] = cudf.Series(M["0"])
cu_M["dst"] = cudf.Series(M["1"])
cu_M["weights"] = cudf.Series(M["weight"])
G.from_cudf_edgelist(cu_M, source="src", destination="dst", edge_attr="weights")
with pytest.raises(ValueError):
cugraph.triangle_count(G)
# FIXME: Remove this test once experimental.triangle count is removed
@pytest.mark.sg
def test_experimental_triangle_count(input_combo):
G = input_combo["G"]
with pytest.warns(Warning):
cugraph.experimental.triangle_count(G)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_balanced_cut.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import networkx as nx
import pandas as pd
import cudf
import cugraph
from cugraph.testing import DEFAULT_DATASETS
def cugraph_call(G, partitions):
df = cugraph.spectralBalancedCutClustering(
G, partitions, num_eigen_vects=partitions
)
score = cugraph.analyzeClustering_edge_cut(G, partitions, df, "vertex", "cluster")
return set(df["vertex"].to_numpy()), score
def random_call(G, partitions):
random.seed(0)
num_verts = G.number_of_vertices()
score = 0.0
for repeat in range(20):
assignment = []
for i in range(num_verts):
assignment.append(random.randint(0, partitions - 1))
assign_cu = cudf.DataFrame(assignment, columns=["cluster"])
assign_cu["vertex"] = assign_cu.index
assign_cu = assign_cu.astype("int32")
score += cugraph.analyzeClustering_edge_cut(G, partitions, assign_cu)
return set(range(num_verts)), (score / 10.0)
PARTITIONS = [2, 4, 8]
# Test all combinations of default/managed and pooled/non-pooled allocation
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("partitions", PARTITIONS)
def test_edge_cut_clustering(graph_file, partitions):
gc.collect()
# read_weights_in_sp=True => value column dtype is float32
G_edge = graph_file.get_graph(ignore_weights=True)
# Get the edge_cut score for partitioning versus random assignment
cu_vid, cu_score = cugraph_call(G_edge, partitions)
rand_vid, rand_score = random_call(G_edge, partitions)
# Assert that the partitioning has better edge_cut than the random
# assignment
dataset_name = graph_file.metadata["name"]
print("graph_file = ", dataset_name, ", partitions = ", partitions)
print(cu_score, rand_score)
assert cu_score < rand_score
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("partitions", PARTITIONS)
def test_edge_cut_clustering_with_edgevals(graph_file, partitions):
gc.collect()
G_edge = graph_file.get_graph()
# read_weights_in_sp=False => value column dtype is float64
G_edge.edgelist.edgelist_df["weights"] = G_edge.edgelist.edgelist_df[
"weights"
].astype("float64")
# Get the edge_cut score for partitioning versus random assignment
cu_vid, cu_score = cugraph_call(G_edge, partitions)
rand_vid, rand_score = random_call(G_edge, partitions)
# Assert that the partitioning has better edge_cut than the random
# assignment
print(cu_score, rand_score)
assert cu_score < rand_score
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("partitions", PARTITIONS)
def test_edge_cut_clustering_with_edgevals_nx(graph_file, partitions):
gc.collect()
# G = cugraph.Graph()
# read_weights_in_sp=True => value column dtype is float32
G = graph_file.get_graph()
NM = G.to_pandas_edgelist().rename(
columns={"src": "0", "dst": "1", "wgt": "weight"}
)
G = nx.from_pandas_edgelist(
NM, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
# Get the edge_cut score for partitioning versus random assignment
df = cugraph.spectralBalancedCutClustering(
G, partitions, num_eigen_vects=partitions
)
pdf = pd.DataFrame.from_dict(df, orient="index").reset_index()
pdf.columns = ["vertex", "cluster"]
gdf = cudf.from_pandas(pdf)
gdf = gdf.astype("int32")
cu_score = cugraph.analyzeClustering_edge_cut(
G, partitions, gdf, "vertex", "cluster"
)
df = set(gdf["vertex"].to_numpy())
Gcu = cugraph.utilities.convert_from_nx(G)
rand_vid, rand_score = random_call(Gcu, partitions)
# Assert that the partitioning has better edge_cut than the random
# assignment
print(cu_score, rand_score)
assert cu_score < rand_score
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/community/test_ecg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cugraph
from cugraph.testing import utils
from cugraph.datasets import karate, dolphins, netscience
def cugraph_call(G, min_weight, ensemble_size):
df = cugraph.ecg(G, min_weight, ensemble_size)
num_parts = df["partition"].max() + 1
score = cugraph.analyzeClustering_modularity(
G, num_parts, df, "vertex", "partition"
)
return score, num_parts
def golden_call(filename):
if filename == "dolphins":
return 0.4962422251701355
if filename == "karate":
return 0.38428664207458496
if filename == "netscience":
return 0.9279554486274719
DATASETS = [karate, dolphins, netscience]
MIN_WEIGHTS = [0.05, 0.10, 0.15]
ENSEMBLE_SIZES = [16, 32]
@pytest.mark.sg
@pytest.mark.parametrize("dataset", DATASETS)
@pytest.mark.parametrize("min_weight", MIN_WEIGHTS)
@pytest.mark.parametrize("ensemble_size", ENSEMBLE_SIZES)
def test_ecg_clustering(dataset, min_weight, ensemble_size):
gc.collect()
# Read in the graph and get a cugraph object
G = dataset.get_graph()
# read_weights_in_sp=False => value column dtype is float64
G.edgelist.edgelist_df["weights"] = G.edgelist.edgelist_df["weights"].astype(
"float64"
)
# Get the modularity score for partitioning versus random assignment
cu_score, num_parts = cugraph_call(G, min_weight, ensemble_size)
filename = dataset.metadata["name"]
golden_score = golden_call(filename)
# Assert that the partitioning has better modularity than the random
# assignment
assert cu_score > (0.95 * golden_score)
@pytest.mark.sg
@pytest.mark.parametrize("dataset", DATASETS)
@pytest.mark.parametrize("min_weight", MIN_WEIGHTS)
@pytest.mark.parametrize("ensemble_size", ENSEMBLE_SIZES)
def test_ecg_clustering_nx(dataset, min_weight, ensemble_size):
gc.collect()
dataset_path = dataset.get_path()
# Read in the graph and get a NetworkX graph
M = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
G = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.Graph()
)
# Get the modularity score for partitioning versus random assignment
df_dict = cugraph.ecg(G, min_weight, ensemble_size, "weight")
assert isinstance(df_dict, dict)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/internals/test_renumber_mg.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file test the Renumbering features
import gc
import pytest
import pandas
import numpy as np
import dask
import cudf
import dask_cudf
import cugraph.dask as dcg
import cugraph
from cugraph.testing import utils
from cugraph.structure.number_map import NumberMap
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
from cudf.testing import assert_frame_equal, assert_series_equal
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize(
"graph_file",
utils.DATASETS_UNRENUMBERED,
ids=[f"dataset={d.as_posix()}" for d in utils.DATASETS_UNRENUMBERED],
)
def test_mg_renumber(graph_file, dask_client):
M = utils.read_csv_for_nx(graph_file)
sources = cudf.Series(M["0"])
destinations = cudf.Series(M["1"])
translate = 1000
gdf = cudf.DataFrame()
gdf["src_old"] = sources
gdf["dst_old"] = destinations
gdf["src"] = sources + translate
gdf["dst"] = destinations + translate
ddf = dask.dataframe.from_pandas(
gdf, npartitions=len(dask_client.scheduler_info()["workers"])
)
# preserve_order is not supported for MG
renumbered_df, renumber_map = NumberMap.renumber(
ddf, ["src", "src_old"], ["dst", "dst_old"], preserve_order=False
)
unrenumbered_df = renumber_map.unrenumber(
renumbered_df, renumber_map.renumbered_src_col_name, preserve_order=False
)
unrenumbered_df = renumber_map.unrenumber(
unrenumbered_df, renumber_map.renumbered_dst_col_name, preserve_order=False
)
# sort needed only for comparisons, since preserve_order is False
gdf = gdf.sort_values(by=["src", "src_old", "dst", "dst_old"])
gdf = gdf.reset_index()
unrenumbered_df = unrenumbered_df.compute()
src = renumber_map.renumbered_src_col_name
dst = renumber_map.renumbered_dst_col_name
unrenumbered_df = unrenumbered_df.sort_values(
by=[f"0_{src}", f"1_{src}", f"0_{dst}", f"1_{dst}"]
)
unrenumbered_df = unrenumbered_df.reset_index()
assert_series_equal(gdf["src"], unrenumbered_df[f"0_{src}"], check_names=False)
assert_series_equal(gdf["src_old"], unrenumbered_df[f"1_{src}"], check_names=False)
assert_series_equal(gdf["dst"], unrenumbered_df[f"0_{dst}"], check_names=False)
assert_series_equal(gdf["dst_old"], unrenumbered_df[f"1_{dst}"], check_names=False)
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize(
"graph_file",
utils.DATASETS_UNRENUMBERED,
ids=[f"dataset={d.as_posix()}" for d in utils.DATASETS_UNRENUMBERED],
)
def test_mg_renumber_add_internal_vertex_id(graph_file, dask_client):
M = utils.read_csv_for_nx(graph_file)
sources = cudf.Series(M["0"])
destinations = cudf.Series(M["1"])
translate = 1000
gdf = cudf.DataFrame()
gdf["src_old"] = sources
gdf["dst_old"] = destinations
gdf["src"] = sources + translate
gdf["dst"] = destinations + translate
gdf["weight"] = gdf.index.astype(np.float64)
ddf = dask.dataframe.from_pandas(
gdf, npartitions=len(dask_client.scheduler_info()["workers"])
)
ren2, num2 = NumberMap.renumber(ddf, ["src", "src_old"], ["dst", "dst_old"])
test_df = gdf[["src", "src_old"]].head()
# simply check that this does not raise an exception
num2.add_internal_vertex_id(
test_df, num2.renumbered_src_col_name, ["src", "src_old"]
)
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_pagerank(dask_client, directed):
pandas.set_option("display.max_rows", 10000)
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
g = cugraph.Graph(directed=directed)
g.from_cudf_edgelist(df, "src", "dst")
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, "src", "dst")
expected_pr = cugraph.pagerank(g)
result_pr = dcg.pagerank(dg).compute()
err = 0
tol = 1.0e-05
assert len(expected_pr) == len(result_pr)
compare_pr = expected_pr.merge(result_pr, on="vertex", suffixes=["_local", "_dask"])
for i in range(len(compare_pr)):
diff = abs(
compare_pr["pagerank_local"].iloc[i] - compare_pr["pagerank_dask"].iloc[i]
)
if diff > tol * 1.1:
err = err + 1
print("Mismatches:", err)
assert err == 0
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize(
"graph_file",
utils.DATASETS_UNRENUMBERED,
ids=[f"dataset={d.as_posix()}" for d in utils.DATASETS_UNRENUMBERED],
)
def test_mg_renumber_common_col_names(graph_file, dask_client):
"""
Ensure that commonly-used column names in the input do not conflict with
names used internally by NumberMap.
"""
M = utils.read_csv_for_nx(graph_file)
sources = cudf.Series(M["0"])
destinations = cudf.Series(M["1"])
numbers = range(len(sources))
offset_numbers = [n + 1 for n in numbers]
floats = [float(n) for n in numbers]
# test multi-column ("legacy" renumbering code path)
gdf = cudf.DataFrame(
{
"src": numbers,
"dst": numbers,
"weights": floats,
"col_a": sources,
"col_b": sources,
"col_c": destinations,
"col_d": destinations,
}
)
ddf = dask.dataframe.from_pandas(
gdf, npartitions=len(dask_client.scheduler_info()["workers"])
)
renumbered_df, renumber_map = NumberMap.renumber(
ddf, ["col_a", "col_b"], ["col_c", "col_d"]
)
assert renumber_map.renumbered_src_col_name != "src"
assert renumber_map.renumbered_dst_col_name != "dst"
assert renumber_map.renumbered_src_col_name in renumbered_df.columns
assert renumber_map.renumbered_dst_col_name in renumbered_df.columns
# test experimental renumbering code path
gdf = cudf.DataFrame(
{
"src": numbers,
"dst": offset_numbers,
"weights": floats,
"col_a": sources,
"col_b": destinations,
}
)
ddf = dask.dataframe.from_pandas(
gdf, npartitions=len(dask_client.scheduler_info()["workers"])
)
renumbered_df, renumber_map = NumberMap.renumber(ddf, "col_a", "col_b")
assert renumber_map.renumbered_src_col_name != "src"
assert renumber_map.renumbered_dst_col_name != "dst"
assert renumber_map.renumbered_src_col_name in renumbered_df.columns
assert renumber_map.renumbered_dst_col_name in renumbered_df.columns
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
def test_pagerank_string_vertex_ids(dask_client):
"""
Ensures string vertex IDs can be used.
Note: the dask_client fixture sets up and tears down a LocalCUDACluster.
See ../conftest.py
"""
# Use pandas and to_csv() to create a CSV file that can be read in by both
# dask_cudf and cudf.
df = cudf.DataFrame(
{
"src": ["a1", "a1", "a2", "a3"],
"dst": ["a2", "a3", "a4", "a4"],
}
)
# SG
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(df, source="src", destination="dst")
sg_results = cugraph.pagerank(G)
sg_results = sg_results.sort_values("vertex").reset_index(drop=True)
# MG
ddf = dask_cudf.from_cudf(df, npartitions=2)
G_dask = cugraph.Graph(directed=True)
G_dask.from_dask_cudf_edgelist(ddf, source="src", destination="dst")
mg_results = dcg.pagerank(G_dask)
# Organize results for easy comparison, this does not change the values. MG
# Pagerank defaults to float64, so convert to float32 when comparing to SG
mg_results = mg_results.compute().sort_values("vertex").reset_index(drop=True)
mg_results["pagerank"] = mg_results["pagerank"].astype("float32")
assert_frame_equal(sg_results, mg_results)
@pytest.mark.mg
@pytest.mark.parametrize("dtype", ["int32", "int64"])
def test_mg_renumber_multi_column(dtype, dask_client):
df = cudf.DataFrame(
{"src_a": [i for i in range(0, 10)], "dst_a": [i for i in range(10, 20)]}
).astype(dtype)
df["src_b"] = df["src_a"] + 10
df["dst_b"] = df["dst_a"] + 20
src_col = ["src_a", "src_b"]
dst_col = ["dst_a", "dst_b"]
ddf = dask_cudf.from_cudf(df, npartitions=2)
edgelist_type = list(ddf.dtypes)
G = cugraph.Graph()
G.from_dask_cudf_edgelist(ddf, source=src_col, destination=dst_col)
renumbered_ddf = G.edgelist.edgelist_df
renumbered_edgelist_type = list(renumbered_ddf.dtypes)
assert set(renumbered_edgelist_type).issubset(set(edgelist_type))
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/internals/test_symmetrize.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import pandas as pd
import cudf
import cugraph
from cugraph.testing import DEFAULT_DATASETS
@pytest.mark.sg
def test_version():
gc.collect()
cugraph.__version__
def compare(src1, dst1, val1, src2, dst2, val2):
#
# We will do comparison computations by using dataframe
# merge functions (essentially doing fast joins). We
# start by making two data frames
#
df1 = cudf.DataFrame()
df1["src1"] = src1
df1["dst1"] = dst1
if val1 is not None:
df1["val1"] = val1
df2 = cudf.DataFrame()
df2["src2"] = src2
df2["dst2"] = dst2
if val2 is not None:
df2["val2"] = val2
#
# Check to see if all pairs in the original data frame
# still exist in the new data frame. If we join (merge)
# the data frames where (src1[i]=src2[i]) and (dst1[i]=dst2[i])
# then we should get exactly the same number of entries in
# the data frame if we did not lose any data.
#
join = df1.merge(df2, left_on=["src1", "dst1"], right_on=["src2", "dst2"])
if len(df1) != len(join):
join2 = df1.merge(
df2, how="left", left_on=["src1", "dst1"], right_on=["src2", "dst2"]
)
pd.set_option("display.max_rows", 500)
print("df1 = \n", df1.sort_values(["src1", "dst1"]))
print("df2 = \n", df2.sort_values(["src2", "dst2"]))
print(
"join2 = \n",
join2.sort_values(["src1", "dst1"])
.to_pandas()
.query("src2.isnull()", engine="python"),
)
assert len(df1) == len(join)
if val1 is not None:
#
# Check the values. In this join, if val1 and val2 are
# the same then we are good. If they are different then
# we need to check if the value is selected from the opposite
# direction, so we'll merge with the edges reversed and
# check to make sure that the values all match
#
diffs = join.query("val1 != val2")
diffs_check = diffs.merge(
df1, left_on=["src1", "dst1"], right_on=["dst1", "src1"]
)
query = diffs_check.query("val1_y != val2")
if len(query) > 0:
print("differences: ")
print(query)
assert 0 == len(query)
#
# Now check the symmetrized edges are present. If the original
# data contains (u,v) we want to make sure that (v,u) is present
# in the new data frame.
#
# We can accomplish this by doing the join (merge) where
# (src1[i] = dst2[i]) and (dst1[i] = src2[i]), and verifying
# that we get exactly the same number of entries in the data frame.
#
join = df1.merge(df2, left_on=["src1", "dst1"], right_on=["dst2", "src2"])
assert len(df1) == len(join)
if val1 is not None:
#
# Check the values. In this join, if val1 and val2 are
# the same then we are good. If they are different then
# we need to check if the value is selected from the opposite
# direction, so we'll merge with the edges reversed and
# check to make sure that the values all match
#
diffs = join.query("val1 != val2")
diffs_check = diffs.merge(
df1, left_on=["src2", "dst2"], right_on=["src1", "dst1"]
)
query = diffs_check.query("val1_y != val2")
if len(query) > 0:
print("differences: ")
print(query)
assert 0 == len(query)
#
# Finally, let's check (in both directions) backwards.
# We want to make sure that no edges were created in
# the symmetrize logic that didn't already exist in one
# direction or the other. This is a bit more complicated.
#
# The complication here is that the original data could,
# for some edge (u,v) ALREADY contain the edge (v,u). The
# symmetrized graph will not duplicate any edges, so the edge
# (u,v) will only be present once. So we can't simply check
# counts of df2 joined with df1.
#
# join1 will contain the join (merge) of df2 to df1 in the
# forward direction
# join2 will contain the join (merge) of df2 to df1 in the
# reverse direction
#
# Finally, we'll do an outer join of join1 and join2, which
# will combine any (u,v)/(v,u) pairs that might exist into
# a joined row while keeping any (u,v) pairs that don't exist
# in both data frames as single rows. This gives us a data frame
# with the same number of rows as the symmetrized data.
#
join1 = df2.merge(df1, left_on=["src2", "dst2"], right_on=["src1", "dst1"])
join2 = df2.merge(df1, left_on=["src2", "dst2"], right_on=["dst1", "src1"])
joinM = join1.merge(join2, how="outer", on=["src2", "dst2"])
assert len(df2) == len(joinM)
#
# Note, we don't need to check the reverse values... we checked
# them in both directions earlier.
#
@pytest.mark.sg
@pytest.mark.skip("debugging")
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_symmetrize_unweighted(graph_file):
gc.collect()
cu_M = graph_file.get_edgelist()
sym_sources, sym_destinations = cugraph.symmetrize(cu_M["src"], cu_M["dst"])
#
# Check to see if all pairs in sources/destinations exist in
# both directions
#
compare(
cu_M["src"],
cu_M["dst"],
None,
sym_sources,
sym_destinations,
None,
)
@pytest.mark.sg
@pytest.mark.skip("debugging")
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_symmetrize_weighted(graph_file):
gc.collect()
cu_M = graph_file.get_edgelist()
sym_src, sym_dst, sym_w = cugraph.symmetrize(cu_M["src"], cu_M["dst"], cu_M["wgt"])
compare(cu_M["src"], cu_M["dst"], cu_M["wgt"], sym_src, sym_dst, sym_w)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/internals/test_replicate_edgelist_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import dask_cudf
import numpy as np
from cugraph.testing import UNDIRECTED_DATASETS, karate_disjoint
from cugraph.structure.replicate_edgelist import replicate_edgelist
from cudf.testing.testing import assert_frame_equal
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
edgeWeightCol = "weights"
edgeIdCol = "edge_id"
edgeTypeCol = "edge_type"
srcCol = "src"
dstCol = "dst"
input_data = UNDIRECTED_DATASETS + [karate_disjoint]
datasets = [pytest.param(d) for d in input_data]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
([True, False], "distributed"),
([True, False], "use_weights"),
([True, False], "use_edge_ids"),
([True, False], "use_edge_type_ids"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
return dict(
zip(
(
"graph_file",
"use_weights",
"use_edge_ids",
"use_edge_type_ids",
"distributed",
),
request.param,
)
)
# =============================================================================
# Tests
# =============================================================================
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
def test_mg_replicate_edgelist(dask_client, input_combo):
df = input_combo["graph_file"].get_edgelist()
distributed = input_combo["distributed"]
use_weights = input_combo["use_weights"]
use_edge_ids = input_combo["use_edge_ids"]
use_edge_type_ids = input_combo["use_edge_type_ids"]
columns = [srcCol, dstCol]
weight = None
edge_id = None
edge_type = None
if use_weights:
df = df.rename(columns={"wgt": edgeWeightCol})
columns.append(edgeWeightCol)
weight = edgeWeightCol
if use_edge_ids:
df = df.reset_index().rename(columns={"index": edgeIdCol})
df[edgeIdCol] = df[edgeIdCol].astype(df[srcCol].dtype)
columns.append(edgeIdCol)
edge_id = edgeIdCol
if use_edge_type_ids:
df[edgeTypeCol] = np.random.randint(0, 10, size=len(df))
df[edgeTypeCol] = df[edgeTypeCol].astype(df[srcCol].dtype)
columns.append(edgeTypeCol)
edge_type = edgeTypeCol
if distributed:
# Distribute the edges across all ranks
num_workers = len(dask_client.scheduler_info()["workers"])
df = dask_cudf.from_cudf(df, npartitions=num_workers)
ddf = replicate_edgelist(
df[columns], weight=weight, edge_id=edge_id, edge_type=edge_type
)
if distributed:
df = df.compute()
for i in range(ddf.npartitions):
result_df = (
ddf.get_partition(i)
.compute()
.sort_values([srcCol, dstCol])
.reset_index(drop=True)
)
expected_df = df[columns].sort_values([srcCol, dstCol]).reset_index(drop=True)
assert_frame_equal(expected_df, result_df, check_dtype=False, check_like=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/internals/test_symmetrize_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import pandas as pd
import dask_cudf
import cugraph
from cugraph.testing import utils
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
@pytest.mark.mg
def test_version():
cugraph.__version__
def compare(ddf1, ddf2, src_col_name, dst_col_name, val_col_name):
#
# We will do comparison computations by using dataframe
# merge functions (essentially doing fast joins).
# Check to see if all pairs in the original data frame
# still exist in the new data frame. If we join (merge)
# the data frames where (src1[i]=src2[i]) and (dst1[i]=dst2[i])
# then we should get exactly the same number of entries in
# the data frame if we did not lose any data.
#
ddf1 = ddf1.add_suffix("_x")
ddf2 = ddf2.add_suffix("_y")
if not isinstance(src_col_name, list) and not isinstance(dst_col_name, list):
src_col_name = [src_col_name]
dst_col_name = [dst_col_name]
# Column names for ddf1
src_col_name1 = [f"{src}_x" for src in src_col_name]
dst_col_name1 = [f"{dst}_x" for dst in dst_col_name]
col_names1 = src_col_name1 + dst_col_name1
# Column names for ddf2
src_col_name2 = [f"{src}_y" for src in src_col_name]
dst_col_name2 = [f"{dst}_y" for dst in dst_col_name]
col_names2 = src_col_name2 + dst_col_name2
if val_col_name is not None:
val_col_name = [val_col_name]
val_col_name1 = [f"{val}_x" for val in val_col_name]
val_col_name2 = [f"{val}_y" for val in val_col_name]
col_names1 += val_col_name1
col_names2 += val_col_name2
#
# Now check the symmetrized edges are present. If the original
# data contains (u,v), we want to make sure that (v,u) is present
# in the new data frame.
#
# We can accomplish this by doing the join (merge) where
# (src1[i] = dst2[i]) and (dst1[i] = src2[i]), and verifying
# that we get exactly the same number of entries in the data frame.
#
join = ddf1.merge(ddf2, left_on=[*col_names1], right_on=[*col_names2])
if len(ddf1) != len(join):
# The code below is for debugging purposes only. It will print
# edges in the original dataframe that are missing from the symmetrize
# dataframe
join2 = ddf1.merge(
ddf2, how="left", left_on=[*col_names1], right_on=[*col_names2]
)
# FIXME: Didn't find a cudf alternative for the function below
pd.set_option("display.max_rows", 500)
print(
"join2 = \n",
join2.sort_values([*col_names1])
.compute()
.to_pandas()
.query(f"{src_col_name[0]}_y.isnull()", engine="python"),
)
assert len(ddf1) == len(join)
#
# Finally, let's check (in both directions) backwards.
# We want to make sure that no edges were created in
# the symmetrize logic that didn't already exist in one
# direction or the other. This is a bit more complicated.
#
# The complication here is that the original data could,
# for some edge (u,v) ALREADY contain the edge (v,u). The
# symmetrized graph will not duplicate any edges, so the edge
# (u,v) will only be present once. So we can't simply check
# counts of ddf2 joined with ddf1.
#
# join1 will contain the join (merge) of ddf2 to ddf1 in the
# forward direction
# join2 will contain the join (merge) of ddf2 to ddf1 in the
# reverse direction
#
# Finally, we'll do an outer join of join1 and join2, which
# will combine any (u,v)/(v,u) pairs that might exist into
# a joined row while keeping any (u,v) pairs that don't exist
# in both data frames as single rows. This gives us a data frame
# with the same number of rows as the symmetrized data.
#
swap_columns = dst_col_name1 + src_col_name1
if val_col_name is not None:
swap_columns += val_col_name1
join1 = ddf2.merge(ddf1, left_on=[*col_names2], right_on=[*col_names1])
join2 = ddf2.merge(ddf1, left_on=[*col_names2], right_on=[*swap_columns])
# Ensure join2["weight_*"] and join1["weight"] are of the same type.
# Failing to do that can trigger ddf to return a warning if the two ddf
# being merge are of dofferent types
join2 = join2.astype(join1.dtypes.to_dict())
joinM = join1.merge(join2, how="outer", on=[*ddf2.columns])
assert len(ddf2) == len(joinM)
#
# Note, we don't need to check the reverse values... we checked
# them in both directions earlier.
#
input_data_path = [
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate-asymmetric.csv"
] + utils.DATASETS_UNDIRECTED
datasets = [pytest.param(d.as_posix()) for d in input_data_path]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
([True, False], "edgevals"),
([True, False], "multi_columns"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
return dict(zip(("graph_file", "edgevals", "multi_columns"), request.param))
@pytest.fixture(scope="module")
def read_datasets(input_combo):
"""
This fixture reads the datasets and returns a dictionary containing all
input params required to run the symmetrize function
"""
graph_file = input_combo["graph_file"]
edgevals = input_combo["edgevals"]
multi_columns = input_combo["multi_columns"]
ddf = utils.read_dask_cudf_csv_file(graph_file)
src_col_name = "src"
dst_col_name = "dst"
val_col_name = None
if edgevals:
val_col_name = "weight"
if multi_columns:
# Generate multicolumn from the ddf
ddf = ddf.rename(columns={"src": "src_0", "dst": "dst_0"})
ddf["src_1"] = ddf["src_0"] + 100
ddf["dst_1"] = ddf["dst_0"] + 100
src_col_name = ["src_0", "src_1"]
dst_col_name = ["dst_0", "dst_1"]
input_combo["ddf"] = ddf
input_combo["src_col_name"] = src_col_name
input_combo["dst_col_name"] = dst_col_name
input_combo["val_col_name"] = val_col_name
return input_combo
# =============================================================================
# Tests
# =============================================================================
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
def test_mg_symmetrize(dask_client, read_datasets):
ddf = read_datasets["ddf"]
src_col_name = read_datasets["src_col_name"]
dst_col_name = read_datasets["dst_col_name"]
val_col_name = read_datasets["val_col_name"]
if val_col_name is not None:
sym_src, sym_dst, sym_val = cugraph.symmetrize(
ddf, src_col_name, dst_col_name, val_col_name
)
else:
if not isinstance(src_col_name, list):
vertex_col_names = [src_col_name, dst_col_name]
else:
vertex_col_names = src_col_name + dst_col_name
ddf = ddf[[*vertex_col_names]]
sym_src, sym_dst = cugraph.symmetrize(ddf, src_col_name, dst_col_name)
# create a dask DataFrame from the dask Series
if isinstance(sym_src, dask_cudf.Series):
ddf2 = sym_src.to_frame()
ddf2 = ddf2.rename(columns={sym_src.name: "src"})
ddf2["dst"] = sym_dst
else:
ddf2 = dask_cudf.concat([sym_src, sym_dst], axis=1)
if val_col_name is not None:
ddf2["weight"] = sym_val
compare(ddf, ddf2, src_col_name, dst_col_name, val_col_name)
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
def test_mg_symmetrize_df(dask_client, read_datasets):
ddf = read_datasets["ddf"]
src_col_name = read_datasets["src_col_name"]
dst_col_name = read_datasets["dst_col_name"]
val_col_name = read_datasets["val_col_name"]
sym_ddf = cugraph.symmetrize_ddf(ddf, src_col_name, dst_col_name, val_col_name)
compare(ddf, sym_ddf, src_col_name, dst_col_name, val_col_name)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/internals/test_renumber.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file test the Renumbering features
import gc
import pytest
import pandas as pd
import cudf
from cudf.testing import assert_series_equal
from cugraph.structure.number_map import NumberMap
from cugraph.testing import utils, DEFAULT_DATASETS
@pytest.mark.sg
def test_renumber_ips_cols():
source_list = [
"192.168.1.1",
"172.217.5.238",
"216.228.121.209",
"192.16.31.23",
]
dest_list = [
"172.217.5.238",
"216.228.121.209",
"192.16.31.23",
"192.168.1.1",
]
pdf = pd.DataFrame({"source_list": source_list, "dest_list": dest_list})
gdf = cudf.from_pandas(pdf)
gdf["source_as_int"] = gdf["source_list"].str.ip2int()
gdf["dest_as_int"] = gdf["dest_list"].str.ip2int()
# Brackets are added to the column names to trigger the python renumebring
renumbered_gdf, renumber_map = NumberMap.renumber(
gdf, ["source_as_int"], ["dest_as_int"], preserve_order=True
)
input_check = renumbered_gdf.merge(gdf, on=["source_list", "dest_list"])
output_check = renumber_map.from_internal_vertex_id(
renumbered_gdf,
renumber_map.renumbered_src_col_name,
external_column_names=["check_src"],
)
output_check = renumber_map.from_internal_vertex_id(
output_check,
renumber_map.renumbered_dst_col_name,
external_column_names=["check_dst"],
)
merged = output_check.merge(input_check, on=["source_list", "dest_list"])
assert_series_equal(merged["check_src"], merged["source_as_int"], check_names=False)
assert_series_equal(merged["check_dst"], merged["dest_as_int"], check_names=False)
@pytest.mark.sg
def test_renumber_negative_col():
source_list = [4, 6, 8, -20, 1]
dest_list = [1, 29, 35, 0, 77]
df = pd.DataFrame({"source_list": source_list, "dest_list": dest_list})
gdf = cudf.DataFrame.from_pandas(df[["source_list", "dest_list"]])
gdf["original_src"] = gdf["source_list"]
gdf["original_dst"] = gdf["dest_list"]
# Brackets are added to the column names to trigger the python renumebring
renumbered_gdf, renumber_map = NumberMap.renumber(
gdf, ["source_list"], ["dest_list"], preserve_order=True
)
input_check = renumbered_gdf.merge(gdf, on=["original_src", "original_dst"])
output_check = renumber_map.from_internal_vertex_id(
renumbered_gdf,
renumber_map.renumbered_src_col_name,
external_column_names=["check_src"],
)
output_check = renumber_map.from_internal_vertex_id(
output_check,
renumber_map.renumbered_dst_col_name,
external_column_names=["check_dst"],
)
merged = output_check.merge(input_check, on=["original_src", "original_dst"])
assert_series_equal(merged["check_src"], merged["original_src"], check_names=False)
assert_series_equal(merged["check_dst"], merged["original_dst"], check_names=False)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_renumber_files_col(graph_file):
gc.collect()
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
sources = cudf.Series(M["0"])
destinations = cudf.Series(M["1"])
translate = 1000
gdf = cudf.DataFrame()
gdf["src"] = cudf.Series([x + translate for x in sources.values_host])
gdf["dst"] = cudf.Series([x + translate for x in destinations.values_host])
exp_src = cudf.Series([x + translate for x in sources.values_host])
exp_dst = cudf.Series([x + translate for x in destinations.values_host])
# Brackets are added to the column names to trigger the python renumebring
renumbered_df, renumber_map = NumberMap.renumber(
gdf, ["src"], ["dst"], preserve_order=True
)
unrenumbered_df = renumber_map.unrenumber(
renumbered_df, renumber_map.renumbered_src_col_name, preserve_order=True
)
unrenumbered_df = renumber_map.unrenumber(
unrenumbered_df, renumber_map.renumbered_dst_col_name, preserve_order=True
)
assert_series_equal(
exp_src,
unrenumbered_df[renumber_map.renumbered_src_col_name],
check_names=False,
)
assert_series_equal(
exp_dst,
unrenumbered_df[renumber_map.renumbered_dst_col_name],
check_names=False,
)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_renumber_files_multi_col(graph_file):
gc.collect()
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
sources = cudf.Series(M["0"])
destinations = cudf.Series(M["1"])
translate = 1000
gdf = cudf.DataFrame()
gdf["src_old"] = sources
gdf["dst_old"] = destinations
gdf["src"] = sources + translate
gdf["dst"] = destinations + translate
# Brackets are added to the column names to trigger the python renumebring
renumbered_df, renumber_map = NumberMap.renumber(
gdf, ["src", "src_old"], ["dst", "dst_old"], preserve_order=True
)
unrenumbered_df = renumber_map.unrenumber(
renumbered_df, renumber_map.renumbered_src_col_name, preserve_order=True
)
unrenumbered_df = renumber_map.unrenumber(
unrenumbered_df, renumber_map.renumbered_dst_col_name, preserve_order=True
)
src = renumber_map.renumbered_src_col_name
dst = renumber_map.renumbered_dst_col_name
assert_series_equal(gdf["src"], unrenumbered_df[f"0_{src}"], check_names=False)
assert_series_equal(gdf["src_old"], unrenumbered_df[f"1_{src}"], check_names=False)
assert_series_equal(gdf["dst"], unrenumbered_df[f"0_{dst}"], check_names=False)
assert_series_equal(gdf["dst_old"], unrenumbered_df[f"1_{dst}"], check_names=False)
@pytest.mark.sg
def test_renumber_common_col_names():
"""
Ensure that commonly-used column names in the input do not conflict with
names used internally by NumberMap.
"""
# test multi-column ("legacy" renumbering code path)
gdf = cudf.DataFrame(
{
"src": [0, 1, 2],
"dst": [1, 2, 3],
"weights": [0.1, 0.2, 0.3],
"col_a": [8, 1, 82],
"col_b": [1, 82, 3],
"col_c": [9, 7, 2],
"col_d": [1, 2, 3],
}
)
renumbered_df, renumber_map = NumberMap.renumber(
gdf, ["col_a", "col_b"], ["col_c", "col_d"]
)
assert renumber_map.renumbered_src_col_name != "src"
assert renumber_map.renumbered_dst_col_name != "dst"
assert renumber_map.renumbered_src_col_name in renumbered_df.columns
assert renumber_map.renumbered_dst_col_name in renumbered_df.columns
# test experimental renumbering code path
gdf = cudf.DataFrame(
{
"src": [0, 1, 2],
"dst": [1, 2, 3],
"weights": [0.1, 0.2, 0.3],
"col_a": [0, 1, 2],
"col_b": [1, 2, 3],
}
)
renumbered_df, renumber_map = NumberMap.renumber(gdf, "col_a", "col_b")
assert renumber_map.renumbered_src_col_name != "src"
assert renumber_map.renumbered_dst_col_name != "dst"
assert renumber_map.renumbered_src_col_name in renumbered_df.columns
assert renumber_map.renumbered_dst_col_name in renumbered_df.columns
@pytest.mark.sg
def test_renumber_unrenumber_non_default_vert_names():
"""
Test that renumbering a dataframe with generated src/dst column names can
be used for unrenumbering results.
"""
input_gdf = cudf.DataFrame(
{
"dst": [1, 2, 3],
"weights": [0.1, 0.2, 0.3],
"col_a": [99, 199, 2],
"col_b": [199, 2, 32],
}
)
# Brackets are added to the column names to trigger the python renumebring
renumbered_df, number_map = NumberMap.renumber(input_gdf, ["col_a"], ["col_b"])
some_result_gdf = cudf.DataFrame({"vertex": [0, 1, 2, 3]})
expected_values = [99, 199, 2, 32]
some_result_gdf = number_map.unrenumber(some_result_gdf, "vertex")
assert sorted(expected_values) == sorted(
some_result_gdf["vertex"].to_arrow().to_pylist()
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/comms/test_comms_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cugraph.dask as dcg
import cudf
import dask_cudf
import cugraph
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_pagerank(dask_client, directed):
# Initialize and run pagerank on two distributed graphs
# with same communicator
input_data_path1 = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
print(f"dataset1={input_data_path1}")
chunksize1 = dcg.get_chunksize(input_data_path1)
input_data_path2 = (RAPIDS_DATASET_ROOT_DIR_PATH / "dolphins.csv").as_posix()
print(f"dataset2={input_data_path2}")
chunksize2 = dcg.get_chunksize(input_data_path2)
ddf1 = dask_cudf.read_csv(
input_data_path1,
chunksize=chunksize1,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg1 = cugraph.Graph(directed=directed)
dg1.from_dask_cudf_edgelist(ddf1, "src", "dst")
result_pr1 = dcg.pagerank(dg1).compute()
ddf2 = dask_cudf.read_csv(
input_data_path2,
chunksize=chunksize2,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg2 = cugraph.Graph(directed=directed)
dg2.from_dask_cudf_edgelist(ddf2, "src", "dst")
result_pr2 = dcg.pagerank(dg2).compute()
# Calculate single GPU pagerank for verification of results
df1 = cudf.read_csv(
input_data_path1,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
g1 = cugraph.Graph(directed=directed)
g1.from_cudf_edgelist(df1, "src", "dst")
expected_pr1 = cugraph.pagerank(g1)
df2 = cudf.read_csv(
input_data_path2,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
g2 = cugraph.Graph(directed=directed)
g2.from_cudf_edgelist(df2, "src", "dst")
expected_pr2 = cugraph.pagerank(g2)
# Compare and verify pagerank results
err1 = 0
err2 = 0
tol = 1.0e-05
compare_pr1 = expected_pr1.merge(
result_pr1, on="vertex", suffixes=["_local", "_dask"]
)
assert len(expected_pr1) == len(result_pr1)
for i in range(len(compare_pr1)):
diff = abs(
compare_pr1["pagerank_local"].iloc[i] - compare_pr1["pagerank_dask"].iloc[i]
)
if diff > tol * 1.1:
err1 = err1 + 1
print("Mismatches in ", input_data_path1, ": ", err1)
assert len(expected_pr2) == len(result_pr2)
compare_pr2 = expected_pr2.merge(
result_pr2, on="vertex", suffixes=["_local", "_dask"]
)
for i in range(len(compare_pr2)):
diff = abs(
compare_pr2["pagerank_local"].iloc[i] - compare_pr2["pagerank_dask"].iloc[i]
)
if diff > tol * 1.1:
err2 = err2 + 1
print("Mismatches in ", input_data_path2, ": ", err2)
assert err1 == err2 == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/nx/test_nx_convert.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import pandas as pd
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, DEFAULT_DATASETS
def _compare_graphs(nxG, cuG, has_wt=True):
assert nxG.number_of_nodes() == cuG.number_of_nodes()
assert nxG.number_of_edges() == cuG.number_of_edges()
cu_df = cuG.view_edge_list().to_pandas()
cu_df = cu_df.rename(columns={"0": "src", "1": "dst"})
if has_wt is True:
cu_df = cu_df.drop(columns=["weight"])
out_of_order = cu_df[cu_df["src"] > cu_df["dst"]]
if len(out_of_order) > 0:
out_of_order = out_of_order.rename(columns={"src": "dst", "dst": "src"})
right_order = cu_df[cu_df["src"] < cu_df["dst"]]
cu_df = pd.concat([out_of_order, right_order])
del out_of_order
del right_order
cu_df = cu_df.sort_values(by=["src", "dst"]).reset_index(drop=True)
nx_df = nx.to_pandas_edgelist(nxG)
if has_wt is True:
nx_df = nx_df.drop(columns=["weight"])
nx_df = nx_df.rename(columns={"source": "src", "target": "dst"})
nx_df = nx_df.astype("int32")
out_of_order = nx_df[nx_df["src"] > nx_df["dst"]]
if len(out_of_order) > 0:
out_of_order = out_of_order.rename(columns={"src": "dst", "dst": "src"})
right_order = nx_df[nx_df["src"] < nx_df["dst"]]
nx_df = pd.concat([out_of_order, right_order])
del out_of_order
del right_order
nx_df = nx_df.sort_values(by=["src", "dst"]).reset_index(drop=True)
assert cu_df.to_dict() == nx_df.to_dict()
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_networkx_compatibility(graph_file):
# test to make sure cuGraph and Nx build similar Graphs
# Read in the graph
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
# create a NetworkX DiGraph
nxG = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
# create a cuGraph Directed Graph
gdf = cudf.from_pandas(M)
cuG = cugraph.from_cudf_edgelist(
gdf,
source="0",
destination="1",
edge_attr="weight",
create_using=cugraph.Graph(directed=True),
)
_compare_graphs(nxG, cuG)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_nx_convert_undirected(graph_file):
# read data and create a Nx Graph
dataset_path = graph_file.get_path()
nx_df = utils.read_csv_for_nx(dataset_path)
nxG = nx.from_pandas_edgelist(nx_df, "0", "1", create_using=nx.Graph)
assert nx.is_directed(nxG) is False
assert nx.is_weighted(nxG) is False
cuG = cugraph.utilities.convert_from_nx(nxG)
assert cuG.is_directed() is False
assert cuG.is_weighted() is False
_compare_graphs(nxG, cuG, has_wt=False)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_nx_convert_directed(graph_file):
# read data and create a Nx DiGraph
dataset_path = graph_file.get_path()
nx_df = utils.read_csv_for_nx(dataset_path)
nxG = nx.from_pandas_edgelist(nx_df, "0", "1", create_using=nx.DiGraph)
assert nxG.is_directed() is True
cuG = cugraph.utilities.convert_from_nx(nxG)
assert cuG.is_directed() is True
assert cuG.is_weighted() is False
_compare_graphs(nxG, cuG, has_wt=False)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_nx_convert_weighted(graph_file):
# read data and create a Nx DiGraph
dataset_path = graph_file.get_path()
nx_df = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
nxG = nx.from_pandas_edgelist(nx_df, "0", "1", "weight", create_using=nx.DiGraph)
assert nx.is_directed(nxG) is True
assert nx.is_weighted(nxG) is True
cuG = cugraph.utilities.convert_from_nx(nxG, weight="weight")
assert cugraph.is_directed(cuG) is True
assert cugraph.is_weighted(cuG) is True
_compare_graphs(nxG, cuG, has_wt=True)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_nx_convert_multicol(graph_file):
# read data and create a Nx Graph
dataset_path = graph_file.get_path()
nx_df = utils.read_csv_for_nx(dataset_path)
G = nx.DiGraph()
for row in nx_df.iterrows():
G.add_edge(row[1]["0"], row[1]["1"], count=[row[1]["0"], row[1]["1"]])
nxG = nx.from_pandas_edgelist(nx_df, "0", "1")
cuG = cugraph.utilities.convert_from_nx(nxG)
assert nxG.number_of_nodes() == cuG.number_of_nodes()
assert nxG.number_of_edges() == cuG.number_of_edges()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/nx/test_compat_algo.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cugraph.experimental.compat.nx as nx
@pytest.mark.sg
def test_connectivity():
# Tests a run of a native nx algorithm that hasnt been overridden.
expected = [{1, 2, 3, 4, 5}, {8, 9, 7}]
G = nx.Graph()
G.add_edges_from([(1, 2), (2, 3), (3, 4), (4, 5)])
G.add_edges_from([(7, 8), (8, 9), (7, 9)])
assert list(nx.connected_components(G)) == expected
@pytest.mark.sg
def test_pagerank_result_type():
G = nx.DiGraph()
[G.add_node(k) for k in ["A", "B", "C", "D", "E", "F", "G"]]
G.add_edges_from(
[
("G", "A"),
("A", "G"),
("B", "A"),
("C", "A"),
("A", "C"),
("A", "D"),
("E", "A"),
("F", "A"),
("D", "B"),
("D", "F"),
]
)
ppr1 = nx.pagerank(G)
# This just tests that the right type is returned.
assert isinstance(ppr1, dict)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/nx/test_compat_pr.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Temporarily suppress warnings till networkX fixes deprecation warnings
# (Using or importing the ABCs from 'collections' instead of from
# 'collections.abc' is deprecated, and in 3.8 it will stop working) for
# python 3.7. Also, this import networkx needs to be relocated in the
# third-party group once this gets fixed.
import gc
import importlib
import pytest
import numpy as np
from cugraph.testing import utils
from cugraph.datasets import karate
from pylibcugraph.testing.utils import gen_fixture_params_product
MAX_ITERATIONS = [100, 200]
TOLERANCE = [1.0e-06]
ALPHA = [0.85, 0.70]
PERS_PERCENT = [0, 15]
HAS_GUESS = [0, 1]
FILES_UNDIRECTED = [karate.get_path()]
# these are only used in the missing parameter tests.
KARATE_RANKING = [11, 9, 14, 15, 18, 20, 22, 17, 21, 12, 26, 16, 28, 19]
KARATE_PERS_RANKING = [11, 16, 17, 21, 4, 10, 5, 6, 12, 7, 9, 24, 19, 25]
KARATE_ITER_RANKINGS = [11, 9, 14, 15, 18, 20, 22, 17, 21, 12, 26, 16, 28, 19]
KARATE_NSTART_RANKINGS = [11, 9, 14, 15, 18, 20, 22, 17, 21, 12, 26, 16, 28, 19]
# =============================================================================
# Pytest fixtures
# =============================================================================
def setup_function():
gc.collect()
datasets = FILES_UNDIRECTED
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(MAX_ITERATIONS, "max_iter"),
(TOLERANCE, "tol"),
(PERS_PERCENT, "pers_percent"),
(HAS_GUESS, "has_guess"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(
("graph_file", "max_iter", "tol", "pers_percent", "has_guess"),
request.param,
)
)
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the expected results from the pagerank algorithm.
"""
import networkx
M = utils.read_csv_for_nx(input_combo["graph_file"])
Gnx = networkx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=networkx.DiGraph()
)
nnz_vtx = np.unique(M[["0", "1"]])
personalization = get_personalization(input_combo["pers_percent"], nnz_vtx)
input_combo["nstart"] = None
nstart = None
if input_combo["has_guess"] == 1:
z = {k: 1.0 / Gnx.number_of_nodes() for k in Gnx.nodes()}
input_combo["nstart"] = z
nstart = z
pr = networkx.pagerank(
Gnx,
max_iter=input_combo["max_iter"],
tol=input_combo["tol"],
personalization=personalization,
nstart=nstart,
)
input_combo["personalization"] = personalization
input_combo["nx_pr_rankings"] = pr
return input_combo
@pytest.fixture(scope="module", params=["networkx", "nxcompat"])
def which_import(request):
if request.param == "networkx":
return importlib.import_module("networkx")
if request.param == "nxcompat":
return importlib.import_module("cugraph.experimental.compat.nx")
# The function selects personalization_perc% of accessible vertices in graph M
# and randomly assigns them personalization values
def get_personalization(personalization_perc, nnz_vtx):
personalization = None
if personalization_perc != 0:
personalization = {}
personalization_count = int((nnz_vtx.size * personalization_perc) / 100.0)
nnz_vtx = np.random.choice(
nnz_vtx, min(nnz_vtx.size, personalization_count), replace=False
)
nnz_val = np.random.random(nnz_vtx.size)
nnz_val = nnz_val / sum(nnz_val)
for vtx, val in zip(nnz_vtx, nnz_val):
personalization[vtx] = val
return personalization
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", FILES_UNDIRECTED)
def test_with_noparams(graph_file, which_import):
nx = which_import
M = utils.read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
pr = nx.pagerank(Gnx)
# Rounding issues show up in runs but this tests that the
# cugraph and networkx algrorithms are being correctly called.
assert (sorted(pr, key=pr.get)[:14]) == KARATE_RANKING
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", FILES_UNDIRECTED)
@pytest.mark.parametrize("max_iter", MAX_ITERATIONS)
def test_with_max_iter(graph_file, max_iter, which_import):
nx = which_import
M = utils.read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
pr = nx.pagerank(Gnx, max_iter=max_iter)
# Rounding issues show up in runs but this tests that the
# cugraph and networkx algrorithms are being correctly called.
assert (sorted(pr, key=pr.get)[:14]) == KARATE_ITER_RANKINGS
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", FILES_UNDIRECTED)
@pytest.mark.parametrize("max_iter", MAX_ITERATIONS)
def test_perc_spec(graph_file, max_iter, which_import):
nx = which_import
# simple personalization to validate running
personalization = {
20: 0.7237260913723357,
12: 0.03952608674390543,
22: 0.2367478218837589,
}
M = utils.read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
# NetworkX PageRank
M = utils.read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
# uses the same personalization for each imported package
pr = nx.pagerank(Gnx, max_iter=max_iter, personalization=personalization)
# Rounding issues show up in runs but this tests that the
# cugraph and networkx algrorithms are being correctly called.
assert (sorted(pr, key=pr.get)[:14]) == KARATE_PERS_RANKING
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", FILES_UNDIRECTED)
@pytest.mark.parametrize("max_iter", MAX_ITERATIONS)
def test_with_nstart(graph_file, max_iter, which_import):
nx = which_import
M = utils.read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
z = {k: 1.0 / Gnx.number_of_nodes() for k in Gnx.nodes()}
M = utils.read_csv_for_nx(graph_file)
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
pr = nx.pagerank(Gnx, max_iter=max_iter, nstart=z)
# Rounding issues show up in runs but this tests that the
# cugraph and networkx algrorithms are being correctly called.
assert (sorted(pr, key=pr.get)[:14]) == KARATE_NSTART_RANKINGS
@pytest.mark.sg
def test_fixture_data(input_expected_output, which_import):
nx = which_import
M = utils.read_csv_for_nx(input_expected_output["graph_file"])
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
pr = nx.pagerank(
Gnx,
max_iter=input_expected_output["max_iter"],
tol=input_expected_output["tol"],
personalization=input_expected_output["personalization"],
nstart=input_expected_output["nstart"],
)
actual = sorted(pr.items())
expected = sorted(input_expected_output["nx_pr_rankings"].items())
assert all([a == pytest.approx(b, abs=1.0e-04) for a, b in zip(actual, expected)])
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_random_walks_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import cugraph
import dask_cudf
import cugraph.dask as dcg
from cugraph.testing import SMALL_DATASETS
from cugraph.datasets import karate_asymmetric
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = SMALL_DATASETS + [karate_asymmetric]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
)
def calc_random_walks(G):
"""
compute random walks
parameters
----------
G : cuGraph.Graph or networkx.Graph
The graph can be either directed (DiGraph) or undirected (Graph).
Weights in the graph are ignored.
Use weight parameter if weights need to be considered
(currently not supported)
Returns
-------
vertex_paths : cudf.Series or cudf.DataFrame
Series containing the vertices of edges/paths in the random walk.
edge_weight_paths: cudf.Series
Series containing the edge weights of edges represented by the
returned vertex_paths
max_path_length : int
The maximum path length
start_vertices : list
Roots for the random walks
max_depth : int
"""
k = random.randint(1, 4)
random_walks_type = "uniform"
max_depth = random.randint(2, 4)
start_vertices = G.nodes().compute().sample(k).reset_index(drop=True)
vertex_paths, edge_weights, max_path_length = dcg.random_walks(
G, random_walks_type, start_vertices, max_depth
)
return (vertex_paths, edge_weights, max_path_length), start_vertices, max_depth
def check_random_walks(G, path_data, seeds, max_depth, df_G=None):
invalid_edge = 0
invalid_edge_wgt_path = 0
invalid_seeds = 0
next_path_idx = 0
invalid_edge_wgt_path = 0
e_wgt_path_idx = 0
v_paths = path_data[0].compute()
e_paths = path_data[1].compute()
max_path_length = path_data[2]
sizes = max_path_length
for _ in range(len(seeds)):
for i in range(next_path_idx, next_path_idx + sizes):
src, dst = v_paths.iloc[i], v_paths.iloc[i + 1]
if i == next_path_idx and src not in seeds.values:
invalid_seeds += 1
print("[ERR] Invalid seed: " " src {} != src {}".format(src, seeds))
else:
# If everything is good proceed to the next part
# now check the destination
# find the src out_degree to ensure it effectively has no outgoing edges
# No need to check for -1 values, move to the next iteration
if src != -1:
src_degree = G.out_degree([src])["degree"].compute()[0]
if dst == -1 and src_degree == 0:
if e_paths.values[e_wgt_path_idx] != 0:
wgt = e_paths.values[e_wgt_path_idx]
print(
"[ERR] Invalid edge weight path: "
"Edge src {} dst {} has wgt 0 "
"But got wgt {}".format(src, dst, wgt)
)
invalid_edge_wgt_path += 1
else:
exp_edge = df_G.loc[
(df_G["src"] == (src)) & (df_G["dst"] == (dst))
].reset_index(drop=True)
if len(exp_edge) == 0:
print(
"[ERR] Invalid edge: "
"There is no edge src {} dst {}".format(src, dst)
)
invalid_edge += 1
else:
# This is a valid edge, check the edge_wgt_path
if e_paths.values[e_wgt_path_idx] != 1:
wgt = e_paths.values[e_wgt_path_idx]
print(
"[ERR] Invalid edge weight path: "
"Edge src {} dst {} has wgt 1 "
"But got wgt {}".format(src, dst, wgt)
)
invalid_edge_wgt_path += 1
else:
# v_path: src == -1, dst == -1 => e_wgt_path=0 otherwise ERROR
if e_paths.values[e_wgt_path_idx] != 0:
wgt = e_paths.values[e_wgt_path_idx]
print(
"[ERR] Invalid edge weight path: "
"Edge src {} dst {} has wgt 0 "
"But got wgt {}".format(src, dst, wgt)
)
invalid_edge_wgt_path += 1
e_wgt_path_idx += 1
next_path_idx += sizes + 1
assert invalid_edge == 0
assert invalid_seeds == 0
assert invalid_edge_wgt_path == 0
assert max_path_length == max_depth
@pytest.fixture(scope="module", params=fixture_params)
def input_graph(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(zip(("graph_file", "directed"), request.param))
input_data_path = parameters["graph_file"].get_path()
directed = parameters["directed"]
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
store_transposed=True,
)
return dg
@pytest.mark.mg
@pytest.mark.cugraph_ops
def test_dask_mg_random_walks(dask_client, benchmark, input_graph):
path_data, seeds, max_depth = calc_random_walks(input_graph)
df_G = input_graph.input_df.compute().reset_index(drop=True)
check_random_walks(input_graph, path_data, seeds, max_depth, df_G)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_bulk_sampler_mg.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import re
import pytest
import cudf
import cupy
import cugraph
import dask_cudf
from cugraph.datasets import karate, email_Eu_core
from cugraph.experimental import BulkSampler
from cugraph.utilities.utils import create_directory_with_overwrite
@pytest.mark.mg
def test_bulk_sampler_simple(dask_client, scratch_dir):
el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"})
el["eid"] = el["eid"].astype("int32")
el["etp"] = cupy.int32(0)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
dask_cudf.from_cudf(el, npartitions=2),
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
samples_path = os.path.join(scratch_dir, "mg_test_bulk_sampler_simple")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=2,
output_path=samples_path,
graph=G,
fanout_vals=[2, 2],
with_replacement=False,
)
batches = dask_cudf.from_cudf(
cudf.DataFrame(
{
"start": cudf.Series([0, 5, 10, 15], dtype="int32"),
"batch": cudf.Series([0, 0, 1, 1], dtype="int32"),
}
),
npartitions=2,
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
recovered_samples = cudf.read_parquet(samples_path)
assert "map" not in recovered_samples.columns
for b in batches["batch"].unique().compute().values_host.tolist():
assert b in recovered_samples["batch_id"].values_host.tolist()
shutil.rmtree(samples_path)
@pytest.mark.mg
def test_bulk_sampler_mg_graph_sg_input(dask_client, scratch_dir):
el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"})
el["eid"] = el["eid"].astype("int32")
el["etp"] = cupy.int32(0)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
dask_cudf.from_cudf(el, npartitions=2),
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
samples_path = os.path.join(scratch_dir, "mg_test_bulk_sampler_mg_graph_sg_input")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=2,
output_path=samples_path,
graph=G,
fanout_vals=[2, 2],
with_replacement=False,
)
batches = cudf.DataFrame(
{
"start": cudf.Series([0, 5, 10, 15], dtype="int32"),
"batch": cudf.Series([0, 0, 1, 1], dtype="int32"),
}
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
recovered_samples = cudf.read_parquet(samples_path)
assert "map" not in recovered_samples.columns
for b in batches["batch"].unique().values_host.tolist():
assert b in recovered_samples["batch_id"].values_host.tolist()
shutil.rmtree(samples_path)
@pytest.mark.mg
@pytest.mark.parametrize("mg_input", [True, False])
def test_bulk_sampler_partitions(dask_client, scratch_dir, mg_input):
el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"})
el["eid"] = el["eid"].astype("int32")
el["etp"] = cupy.int32(0)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
dask_cudf.from_cudf(el, npartitions=2),
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_partitions_mg")
if os.path.exists(samples_path):
shutil.rmtree(samples_path)
os.makedirs(samples_path)
bs = BulkSampler(
batch_size=3,
output_path=samples_path,
graph=G,
fanout_vals=[2, 2],
with_replacement=False,
batches_per_partition=2,
renumber=True,
)
batches = cudf.DataFrame(
{
"start": cudf.Series([0, 5, 6, 10, 15, 17, 18, 9, 23], dtype="int32"),
"batch": cudf.Series([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype="int32"),
}
)
if mg_input:
batches = dask_cudf.from_cudf(batches, npartitions=4)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
for file in os.listdir(samples_path):
start_batch_id, end_batch_id = [
int(x) for x in re.match(r"batch=([0-9]+)-([0-9]+).parquet", file).groups()
]
recovered_samples = cudf.read_parquet(os.path.join(samples_path, file))
recovered_map = recovered_samples.map
recovered_samples = recovered_samples.drop("map", axis=1).dropna()
for current_batch_id in range(start_batch_id, end_batch_id + 1):
map_start_ix = recovered_map.iloc[current_batch_id - start_batch_id]
map_end_ix = recovered_map.iloc[current_batch_id - start_batch_id + 1]
map_current_batch = recovered_map.iloc[map_start_ix:map_end_ix]
n_unique = cudf.concat(
[
recovered_samples[
recovered_samples.batch_id == current_batch_id
].sources,
recovered_samples[
recovered_samples.batch_id == current_batch_id
].destinations,
]
).nunique()
assert len(map_current_batch) == n_unique
@pytest.mark.mg
def test_bulk_sampler_empty_batches(dask_client, scratch_dir):
edgelist = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 0, 1, 2, 3, 4, 5, 6, 4, 4],
"dst": [3, 2, 0, 7, 8, 9, 1, 2, 8, 1],
}
),
npartitions=2,
)
batches = dask_cudf.from_cudf(
cudf.DataFrame(
{
"start": [0, 1, 2, 7, 8, 9, 3, 2, 7],
"batch": cudf.Series([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype="int32"),
}
),
npartitions=2,
)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(edgelist, source="src", destination="dst")
samples_path = os.path.join(scratch_dir, "mg_test_bulk_sampler_empty_batches")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=3,
output_path=samples_path,
graph=G,
fanout_vals=[-1, -1],
with_replacement=False,
batches_per_partition=6,
renumber=False,
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
assert len(os.listdir(samples_path)) == 1
df = cudf.read_parquet(os.path.join(samples_path, "batch=0-1.parquet"))
assert df[
(df.batch_id == 0) & (df.hop_id == 0)
].destinations.sort_values().values_host.tolist() == [0, 2, 3, 7]
assert df[
(df.batch_id == 0) & (df.hop_id == 1)
].destinations.sort_values().values_host.tolist() == [2, 3, 7, 8]
assert df[
(df.batch_id == 1) & (df.hop_id == 0)
].destinations.sort_values().values_host.tolist() == [7, 8]
assert len(df[(df.batch_id == 1) & (df.hop_id == 1)]) == 0
assert df.batch_id.max() == 1
shutil.rmtree(samples_path)
@pytest.mark.mg
@pytest.mark.parametrize("mg_input", [True, False])
def test_bulk_sampler_csr(dask_client, scratch_dir, mg_input):
nworkers = len(dask_client.scheduler_info()["workers"])
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=nworkers * 2)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
samples_path = os.path.join(scratch_dir, "mg_test_bulk_sampler_csr")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=7,
output_path=samples_path,
graph=G,
fanout_vals=[5, 4, 3],
with_replacement=False,
batches_per_partition=7,
renumber=True,
use_legacy_names=False,
compression="CSR",
compress_per_hop=True,
prior_sources_behavior="carryover",
deduplicate_sources=True,
include_hop_column=False,
)
seeds = G.select_random_vertices(62, 1000)
batch_ids = cudf.Series(
cupy.repeat(cupy.arange(int(1000 / 7) + 1, dtype="int32"), 7)[:1000]
).sort_values()
batch_df = cudf.DataFrame(
{
"seed": seeds.compute().values,
"batch": batch_ids,
}
)
if mg_input:
batch_df = dask_cudf.from_cudf(batch_df, npartitions=2)
bs.add_batches(batch_df, start_col_name="seed", batch_col_name="batch")
bs.flush()
assert len(os.listdir(samples_path)) == 21
for file in os.listdir(samples_path):
df = cudf.read_parquet(os.path.join(samples_path, file))
assert df.major_offsets.dropna().iloc[-1] - df.major_offsets.iloc[0] == len(df)
shutil.rmtree(samples_path)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_random_walks.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import networkx as nx
import cudf
import cugraph
from cudf.testing import assert_series_equal
from cugraph.utilities import ensure_cugraph_obj_for_nx
from cugraph.testing import SMALL_DATASETS, DEFAULT_DATASETS
# =============================================================================
# Parameters
# =============================================================================
DIRECTED_GRAPH_OPTIONS = [False, True]
WEIGHTED_GRAPH_OPTIONS = [False, True]
DATASETS = [pytest.param(d) for d in DEFAULT_DATASETS]
SMALL_DATASETS = [pytest.param(d) for d in SMALL_DATASETS]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def calc_random_walks(G, max_depth=None, use_padding=False, legacy_result_type=True):
"""
compute random walks for each nodes in 'start_vertices'
parameters
----------
G : cuGraph.Graph or networkx.Graph
The graph can be either directed or undirected.
Weights in the graph are ignored.
Use weight parameter if weights need to be considered
(currently not supported)
start_vertices : int or list or cudf.Series
A single node or a list or a cudf.Series of nodes from which to run
the random walks
max_depth : int
The maximum depth of the random walks
use_padding : bool
If True, padded paths are returned else coalesced paths are returned.
Returns
-------
vertex_paths : cudf.Series or cudf.DataFrame
Series containing the vertices of edges/paths in the random walk.
edge_weight_paths: cudf.Series
Series containing the edge weights of edges represented by the
returned vertex_paths
sizes: int
The path size in case of coalesced paths.
"""
assert G is not None
G, _ = ensure_cugraph_obj_for_nx(G, nx_weight_attr="wgt")
k = random.randint(1, 6)
random_walks_type = "uniform"
start_vertices = G.select_random_vertices(num_vertices=k)
print("\nstart_vertices is \n", start_vertices)
vertex_paths, edge_weights, vertex_path_sizes = cugraph.random_walks(
G, random_walks_type, start_vertices, max_depth, use_padding, legacy_result_type
)
return (vertex_paths, edge_weights, vertex_path_sizes), start_vertices
def check_random_walks(path_data, seeds, G):
invalid_edge = 0
invalid_seeds = 0
offsets_idx = 0
next_path_idx = 0
v_paths = path_data[0]
df_G = G.input_df
sizes = path_data[2].to_numpy().tolist()
for s in sizes:
for i in range(next_path_idx, next_path_idx + s - 1):
src, dst = v_paths.iloc[i], v_paths.iloc[i + 1]
if i == next_path_idx and src != seeds[offsets_idx]:
invalid_seeds += 1
print(
"[ERR] Invalid seed: "
" src {} != src {}".format(src, seeds[offsets_idx])
)
offsets_idx += 1
next_path_idx += s
exp_edge = df_G.loc[
(df_G["src"] == (src)) & (df_G["dst"] == (dst))
].reset_index(drop=True)
if len(exp_edge) == 0:
print(
"[ERR] Invalid edge: " "There is no edge src {} dst {}".format(src, dst)
)
invalid_edge += 1
assert invalid_edge == 0
assert invalid_seeds == 0
def check_random_walks_padded(G, path_data, seeds, max_depth, legacy_result_type=True):
invalid_edge = 0
invalid_seeds = 0
invalid_edge_wgt = 0
v_paths = path_data[0]
e_wgt_paths = path_data[1]
e_wgt_idx = 0
G, _ = ensure_cugraph_obj_for_nx(G, nx_weight_attr="wgt")
df_G = G.input_df
if "weight" in df_G.columns:
df_G = df_G.rename(columns={"weight": "wgt"})
total_depth = (max_depth) * len(seeds)
for i in range(total_depth - 1):
vertex_1, vertex_2 = v_paths.iloc[i], v_paths.iloc[i + 1]
# Every max_depth'th vertex in 'v_paths' is a seed
# instead of 'seeds[i // (max_depth)]', could have just pop the first element
# of the seeds array once there is a match and compare it to 'vertex_1'
if i % (max_depth) == 0 and vertex_1 != seeds[i // (max_depth)]:
invalid_seeds += 1
print(
"[ERR] Invalid seed: "
" src {} != src {}".format(vertex_1, seeds[i // (max_depth)])
)
if (i % (max_depth)) != (max_depth - 1):
# These are the edges
src = vertex_1
dst = vertex_2
if src != -1 and dst != -1:
# check for valid edge.
edge = df_G.loc[
(df_G["src"] == (src)) & (df_G["dst"] == (dst))
].reset_index(drop=True)
if len(edge) == 0:
print(
"[ERR] Invalid edge: "
"There is no edge src {} dst {}".format(src, dst)
)
invalid_edge += 1
else:
# check valid edge wgt
if G.is_weighted():
expected_wgt = edge["wgt"].iloc[0]
result_wgt = e_wgt_paths.iloc[e_wgt_idx]
if expected_wgt != result_wgt:
print(
"[ERR] Invalid edge wgt: "
"The edge src {} dst {} has wgt {} but got {}".format(
src, dst, expected_wgt, result_wgt
)
)
invalid_edge_wgt += 1
e_wgt_idx += 1
if src != -1 and dst == -1:
# ensure there is no outgoing edges from 'src'
assert G.out_degree([src])["degree"].iloc[0] == 0
assert invalid_seeds == 0
assert invalid_edge == 0
assert len(v_paths) == (max_depth) * len(seeds)
if G.is_weighted():
assert invalid_edge_wgt == 0
assert len(e_wgt_paths) == (max_depth - 1) * len(seeds)
if legacy_result_type:
sizes = path_data[2]
assert sizes is None
else:
max_path_lenth = path_data[2]
assert max_path_lenth == max_depth - 1
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("max_depth", [None])
def test_random_walks_invalid_max_dept(graph_file, directed, max_depth):
input_graph = graph_file.get_graph(create_using=cugraph.Graph(directed=directed))
with pytest.raises(TypeError):
_, _, _ = calc_random_walks(input_graph, max_depth=max_depth)
@pytest.mark.sg
@pytest.mark.cugraph_ops
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
def test_random_walks_coalesced(graph_file, directed):
max_depth = random.randint(2, 10)
input_graph = graph_file.get_graph(create_using=cugraph.Graph(directed=directed))
path_data, seeds = calc_random_walks(
input_graph, max_depth=max_depth, use_padding=False
)
check_random_walks(path_data, seeds, input_graph)
# Check path query output
df = cugraph.rw_path(len(seeds), path_data[2])
v_offsets = [0] + path_data[2].cumsum()[:-1].to_numpy().tolist()
w_offsets = [0] + (path_data[2] - 1).cumsum()[:-1].to_numpy().tolist()
assert_series_equal(df["weight_sizes"], path_data[2] - 1, check_names=False)
assert df["vertex_offsets"].to_numpy().tolist() == v_offsets
assert df["weight_offsets"].to_numpy().tolist() == w_offsets
@pytest.mark.sg
@pytest.mark.cugraph_ops
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
def test_random_walks_padded_0(graph_file, directed):
max_depth = random.randint(2, 10)
print("max_depth is ", max_depth)
input_graph = graph_file.get_graph(create_using=cugraph.Graph(directed=directed))
path_data, seeds = calc_random_walks(
input_graph, max_depth=max_depth, use_padding=True
)
check_random_walks_padded(input_graph, path_data, seeds, max_depth)
# test for 'legacy_result_type=False'
path_data, seeds = calc_random_walks(
input_graph, max_depth=max_depth, use_padding=True, legacy_result_type=False
)
# Non 'legacy_result_type' has an extra edge 'path_data'
check_random_walks_padded(
input_graph, path_data, seeds, max_depth + 1, legacy_result_type=False
)
@pytest.mark.sg
@pytest.mark.cugraph_ops
def test_random_walks_padded_1():
max_depth = random.randint(2, 10)
df = cudf.DataFrame()
df["src"] = [1, 2, 4, 7, 3]
df["dst"] = [5, 4, 1, 5, 2]
df["wgt"] = [0.4, 0.5, 0.6, 0.7, 0.8]
input_graph = cugraph.Graph(directed=True)
input_graph.from_cudf_edgelist(
df, source="src", destination="dst", edge_attr="wgt", renumber=True
)
path_data, seeds = calc_random_walks(
input_graph, max_depth=max_depth, use_padding=True
)
check_random_walks_padded(input_graph, path_data, seeds, max_depth)
@pytest.mark.sg
@pytest.mark.cugraph_ops
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
def test_random_walks_nx(graph_file):
G = graph_file.get_graph(create_using=cugraph.Graph(directed=True))
M = G.to_pandas_edgelist()
source = G.source_columns
target = G.destination_columns
edge_attr = G.weight_column
Gnx = nx.from_pandas_edgelist(
M,
source=source,
target=target,
edge_attr=edge_attr,
create_using=nx.DiGraph(),
)
max_depth = random.randint(2, 10)
path_data, seeds = calc_random_walks(Gnx, max_depth=max_depth, use_padding=True)
check_random_walks_padded(Gnx, path_data, seeds, max_depth)
"""@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
@pytest.mark.sg
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
def test_random_walks(
graph_file,
directed
):
max_depth = random.randint(2, 10)
df_G = utils.read_csv_file(graph_file)
df_G.rename(
columns={"0": "src", "1": "dst", "2": "weight"}, inplace=True)
df_G['src_0'] = df_G['src'] + 1000
df_G['dst_0'] = df_G['dst'] + 1000
if directed:
G = cugraph.Graph(directed=True)
else:
G = cugraph.Graph()
G.from_cudf_edgelist(df_G, source=['src', 'src_0'],
destination=['dst', 'dst_0'],
edge_attr="weight")
k = random.randint(1, 10)
start_vertices = random.sample(G.nodes().to_numpy().tolist(), k)
seeds = cudf.DataFrame()
seeds['v'] = start_vertices
seeds['v_0'] = seeds['v'] + 1000
df, offsets = cugraph.random_walks(G, seeds, max_depth)
check_random_walks(df, offsets, seeds, df_G)
"""
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import os
import pytest
import pandas
import cupy
import cudf
import cugraph
import dask_cudf
import cugraph.dask as dcg
from cugraph.testing import UNDIRECTED_DATASETS
from cugraph.dask import uniform_neighbor_sample
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.datasets import email_Eu_core, small_tree
from pylibcugraph.testing.utils import gen_fixture_params_product
# If the rapids-pytest-benchmark plugin is installed, the "gpubenchmark"
# fixture will be available automatically. Check that this fixture is available
# by trying to import rapids_pytest_benchmark, and if that fails, set
# "gpubenchmark" to the standard "benchmark" fixture provided by
# pytest-benchmark.
try:
import rapids_pytest_benchmark # noqa: F401
except ImportError:
import pytest_benchmark
gpubenchmark = pytest_benchmark.plugin.benchmark
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
IS_DIRECTED = [True, False]
datasets = UNDIRECTED_DATASETS + [email_Eu_core]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
([False, True], "with_replacement"),
(["int32", "float32"], "indices_type"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(
("graph_file", "directed", "with_replacement", "indices_type"),
request.param,
)
)
indices_type = parameters["indices_type"]
input_data_path = parameters["graph_file"].get_path()
directed = parameters["directed"]
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", indices_type],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
store_transposed=False,
)
parameters["MGGraph"] = dg
# sample k vertices from the cuGraph graph
k = random.randint(1, 3)
srcs = dg.input_df["src"]
dsts = dg.input_df["dst"]
vertices = dask_cudf.concat([srcs, dsts]).drop_duplicates().compute()
start_list = vertices.sample(k).astype("int32")
# Generate a random fanout_vals list of length random(1, k)
fanout_vals = [random.randint(1, k) for _ in range(random.randint(1, k))]
# These prints are for debugging purposes since the vertices and the
# fanout_vals are randomly sampled/chosen
print("\nstart_list: \n", start_list)
print("fanout_vals: ", fanout_vals)
parameters["start_list"] = start_list
parameters["fanout_vals"] = fanout_vals
return parameters
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
@pytest.mark.cugraph_ops
def test_mg_uniform_neighbor_sample_simple(dask_client, input_combo):
dg = input_combo["MGGraph"]
input_df = dg.input_df
result_nbr = uniform_neighbor_sample(
dg,
input_combo["start_list"],
input_combo["fanout_vals"],
with_replacement=input_combo["with_replacement"],
)
# multi edges are dropped to easily verify that each edge in the
# results is present in the input dataframe
result_nbr = result_nbr.drop_duplicates()
# FIXME: The indices are not included in the comparison because garbage
# value are intermittently retuned. This observation is observed when
# passing float weights
join = result_nbr.merge(
input_df, left_on=[*result_nbr.columns[:2]], right_on=[*input_df.columns[:2]]
)
if len(result_nbr) != len(join):
join2 = input_df.merge(
result_nbr,
how="right",
left_on=[*input_df.columns],
right_on=[*result_nbr.columns],
)
# The left part of the datasets shows which edge is missing from the
# right part where the left and right part are respectively the
# uniform-neighbor-sample results and the input dataframe.
difference = (
join2.sort_values([*result_nbr.columns])
.compute()
.to_pandas()
.query("src.isnull()", engine="python")
)
invalid_edge = difference[difference.columns[:3]]
raise Exception(
f"\nThe edges below from uniform-neighbor-sample "
f"are invalid\n {invalid_edge}"
)
# Ensure the right indices type is returned
assert result_nbr["indices"].dtype == input_combo["indices_type"]
sampled_vertex_result = (
dask_cudf.concat([result_nbr["sources"], result_nbr["destinations"]])
.drop_duplicates()
.compute()
.reset_index(drop=True)
)
sampled_vertex_result = sampled_vertex_result.to_pandas()
start_list = input_combo["start_list"].to_pandas()
if not set(start_list).issubset(set(sampled_vertex_result)):
missing_vertex = set(start_list) - set(sampled_vertex_result)
missing_vertex = list(missing_vertex)
# compute the out-degree of the missing vertices
out_degree = dg.out_degree(missing_vertex)
out_degree = out_degree[out_degree.degree != 0]
# If the missing vertices have outgoing edges, return an error
if len(out_degree) != 0:
missing_vertex = out_degree["vertex"].compute().to_pandas().to_list()
raise Exception(
f"vertex {missing_vertex} is missing from "
f"uniform neighbor sampling results"
)
@pytest.mark.mg
@pytest.mark.cugraph_ops
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_mg_uniform_neighbor_sample_tree(dask_client, directed):
input_data_path = small_tree.get_path()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
G = cugraph.Graph(directed=directed)
G.from_dask_cudf_edgelist(ddf, "src", "dst", "value", store_transposed=False)
# TODO: Incomplete, include more testing for tree graph as well as
# for larger graphs
start_list = cudf.Series([0, 0], dtype="int32")
fanout_vals = [4, 1, 3]
with_replacement = True
result_nbr = uniform_neighbor_sample(
G, start_list, fanout_vals, with_replacement=with_replacement
)
result_nbr = result_nbr.drop_duplicates()
# input_df != ddf if 'directed = False' because ddf will be symmetrized
# internally.
input_df = G.input_df
join = result_nbr.merge(
input_df, left_on=[*result_nbr.columns[:2]], right_on=[*input_df.columns[:2]]
)
assert len(join) == len(result_nbr)
# Since the validity of results have (probably) been tested at both the C++
# and C layers, simply test that the python interface and conversions were
# done correctly.
assert result_nbr["sources"].dtype == "int32"
assert result_nbr["destinations"].dtype == "int32"
assert result_nbr["indices"].dtype == "float32"
result_nbr_vertices = (
dask_cudf.concat([result_nbr["sources"], result_nbr["destinations"]])
.drop_duplicates()
.compute()
.reset_index(drop=True)
)
result_nbr_vertices = result_nbr_vertices.to_pandas()
start_list = start_list.to_pandas()
# The vertices in start_list must be a subsets of the vertices
# in the result
assert set(start_list).issubset(set(result_nbr_vertices))
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="FIXME: MG test fails on single-GPU")
@pytest.mark.cugraph_ops
def test_mg_uniform_neighbor_sample_unweighted(dask_client):
df = cudf.DataFrame(
{
"src": cudf.Series([0, 1, 2, 2, 0, 1, 4, 4], dtype="int32"),
"dst": cudf.Series([3, 2, 1, 4, 1, 3, 1, 2], dtype="int32"),
}
)
df = dask_cudf.from_cudf(df, npartitions=2)
G = cugraph.Graph()
G.from_dask_cudf_edgelist(df, source="src", destination="dst")
start_list = cudf.Series([0], dtype="int32")
fanout_vals = [-1]
with_replacement = True
sampling_results = uniform_neighbor_sample(
G, start_list, fanout_vals, with_replacement=with_replacement
)
expected_src = [0, 0]
actual_src = sampling_results.sources
actual_src = actual_src.compute().to_arrow().to_pylist()
assert sorted(actual_src) == sorted(expected_src)
expected_dst = [3, 1]
actual_dst = sampling_results.destinations
actual_dst = actual_dst.compute().to_arrow().to_pylist()
assert sorted(actual_dst) == sorted(expected_dst)
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="FIXME: MG test fails on single-GPU")
@pytest.mark.cugraph_ops
def test_mg_uniform_neighbor_sample_ensure_no_duplicates(dask_client):
# See issue #2760
# This ensures that the starts are properly distributed
df = cudf.DataFrame({"src": [6, 6, 6, 6], "dst": [7, 9, 10, 11]})
df = df.astype("int32")
dask_df = dask_cudf.from_cudf(df, npartitions=2)
mg_G = cugraph.MultiGraph(directed=True)
mg_G.from_dask_cudf_edgelist(
dask_df, source="src", destination="dst", renumber=True
)
output_df = cugraph.dask.uniform_neighbor_sample(
mg_G,
cudf.Series([6]).astype("int32"),
fanout_vals=[3],
with_replacement=False,
)
assert len(output_df.compute()) == 3
@pytest.mark.mg
@pytest.mark.cugraph_ops
@pytest.mark.parametrize("return_offsets", [True, False])
def test_uniform_neighbor_sample_edge_properties(dask_client, return_offsets):
n_workers = len(dask_client.scheduler_info()["workers"])
if n_workers <= 1:
pytest.skip("Test only valid for MG environments")
edgelist_df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 1, 2, 3, 4, 3, 4, 2, 0, 1, 0, 2],
"dst": [1, 2, 4, 2, 3, 4, 1, 1, 2, 3, 4, 4],
"eid": cudf.Series(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], dtype="int64"
),
"etp": cudf.Series([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 0], dtype="int32"),
"w": [0.0, 0.1, 0.2, 3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.10, 0.11],
}
),
npartitions=2,
)
G = cugraph.MultiGraph(directed=True)
G.from_dask_cudf_edgelist(
edgelist_df,
source="src",
destination="dst",
edge_attr=["w", "eid", "etp"],
)
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
start_list=cudf.DataFrame(
{
"start": cudf.Series([0, 4], dtype="int64"),
"batch": cudf.Series([0, 1], dtype="int32"),
}
),
fanout_vals=[-1, -1],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
keep_batches_together=True,
min_batch_id=0,
max_batch_id=1,
return_offsets=return_offsets,
)
if return_offsets:
sampling_results, sampling_offsets = sampling_results
batches_found = {0: 0, 1: 0}
for i in range(n_workers):
dfp = sampling_results.get_partition(i).compute()
if len(dfp) > 0:
offsets_p = sampling_offsets.get_partition(i).compute()
print(offsets_p)
assert len(offsets_p) > 0
if offsets_p.batch_id.iloc[0] == 1:
batches_found[1] += 1
assert offsets_p.batch_id.dropna().values_host.tolist() == [1]
assert offsets_p.offsets.dropna().values_host.tolist() == [
0,
len(dfp),
]
assert sorted(dfp.sources.values_host.tolist()) == (
[1, 1, 3, 3, 4, 4]
)
assert sorted(dfp.destinations.values_host.tolist()) == (
[1, 2, 2, 3, 3, 4]
)
elif offsets_p.batch_id.iloc[0] == 0:
batches_found[0] += 1
assert offsets_p.batch_id.dropna().values_host.tolist() == [0]
assert offsets_p.offsets.dropna().values_host.tolist() == [
0,
len(dfp),
]
assert sorted(dfp.sources.values_host.tolist()) == (
[0, 0, 0, 1, 1, 2, 2, 2, 4, 4]
)
assert sorted(dfp.destinations.values_host.tolist()) == (
[1, 1, 1, 2, 2, 3, 3, 4, 4, 4]
)
mdf = cudf.merge(
sampling_results.compute(),
edgelist_df.compute(),
left_on="edge_id",
right_on="eid",
)
assert (mdf.w == mdf.weight).all()
assert (mdf.etp == mdf.edge_type).all()
assert (mdf.src == mdf.sources).all()
assert (mdf.dst == mdf.destinations).all()
assert sorted(sampling_results.compute()["hop_id"].values_host.tolist()) == [
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
@pytest.mark.mg
def test_uniform_neighbor_sample_edge_properties_self_loops(dask_client):
df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 1, 2],
"dst": [0, 1, 2],
"eid": [2, 4, 6],
"etp": cudf.Series([1, 1, 2], dtype="int32"),
"w": [0.0, 0.1, 0.2],
}
),
npartitions=2,
)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
df,
source="src",
destination="dst",
edge_attr=["w", "eid", "etp"],
)
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
start_list=dask_cudf.from_cudf(
cudf.DataFrame(
{
"start": cudf.Series([0, 1, 2], dtype="int64"),
"batch": cudf.Series([1, 1, 1], dtype="int32"),
}
),
npartitions=2,
),
fanout_vals=[2, 2],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
).compute()
assert sorted(sampling_results.sources.values_host.tolist()) == [0, 0, 1, 1, 2, 2]
assert sorted(sampling_results.destinations.values_host.tolist()) == [
0,
0,
1,
1,
2,
2,
]
assert sorted(sampling_results.weight.values_host.tolist()) == [
0.0,
0.0,
0.1,
0.1,
0.2,
0.2,
]
assert sorted(sampling_results.edge_id.values_host.tolist()) == [2, 2, 4, 4, 6, 6]
assert sorted(sampling_results.edge_type.values_host.tolist()) == [1, 1, 1, 1, 2, 2]
assert sorted(sampling_results.batch_id.values_host.tolist()) == [1, 1, 1, 1, 1, 1]
assert sorted(sampling_results.hop_id.values_host.tolist()) == [0, 0, 0, 1, 1, 1]
@pytest.mark.mg
def test_uniform_neighbor_sample_hop_id_order():
df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 1, 2, 3, 3, 6],
"dst": [2, 3, 4, 5, 6, 7],
}
),
npartitions=2,
)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(df, source="src", destination="dst")
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
cudf.Series([0, 1], dtype="int64"),
fanout_vals=[2, 2, 2],
with_replacement=False,
with_edge_properties=True,
)
for p in range(sampling_results.npartitions):
sampling_results_p = sampling_results.get_partition(p).compute()
assert (
sorted(sampling_results_p.hop_id.values_host.tolist())
== sampling_results_p.hop_id.values_host.tolist()
)
@pytest.mark.mg
def test_uniform_neighbor_sample_hop_id_order_multi_batch():
df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 1, 2, 3, 3, 6],
"dst": [2, 3, 4, 5, 6, 7],
}
),
npartitions=2,
)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(df, source="src", destination="dst")
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
dask_cudf.from_cudf(
cudf.DataFrame(
{
"start": cudf.Series([0, 1], dtype="int64"),
"batch": cudf.Series([0, 1], dtype="int32"),
}
),
npartitions=2,
),
fanout_vals=[2, 2, 2],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
)
for p in range(sampling_results.npartitions):
sampling_results_p = sampling_results.get_partition(p)
if len(sampling_results_p) > 0:
for b in range(2):
sampling_results_pb = sampling_results_p[
sampling_results_p.batch_id == b
].compute()
assert (
sorted(sampling_results_pb.hop_id.values_host.tolist())
== sampling_results_pb.hop_id.values_host.tolist()
)
@pytest.mark.mg
@pytest.mark.parametrize("with_replacement", [True, False])
@pytest.mark.skipif(
len(os.getenv("DASK_WORKER_DEVICES", "0").split(",")) < 2,
reason="too few workers to test",
)
def test_uniform_neighbor_edge_properties_sample_small_start_list(
dask_client, with_replacement
):
df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 1, 2],
"dst": [0, 1, 2],
"eid": [2, 4, 6],
"etp": cudf.Series([1, 1, 2], dtype="int32"),
"w": [0.0, 0.1, 0.2],
}
),
npartitions=2,
)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
df,
source="src",
destination="dst",
edge_attr=["w", "eid", "etp"],
)
cugraph.dask.uniform_neighbor_sample(
G,
start_list=dask_cudf.from_cudf(
cudf.Series(
{
"start": cudf.Series([0]),
"batch": cudf.Series([10], dtype="int32"),
}
),
npartitions=1,
),
fanout_vals=[10, 25],
with_replacement=with_replacement,
with_edge_properties=True,
with_batch_ids=True,
)
@pytest.mark.mg
def test_uniform_neighbor_sample_without_dask_inputs(dask_client):
df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 1, 2],
"dst": [0, 1, 2],
"eid": [2, 4, 6],
"etp": cudf.Series([1, 1, 2], dtype="int32"),
"w": [0.0, 0.1, 0.2],
}
),
npartitions=2,
)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
df,
source="src",
destination="dst",
edge_attr=["w", "eid", "etp"],
)
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
start_list=cudf.DataFrame(
{
"start": cudf.Series([0, 1, 2]),
"batch": cudf.Series([1, 1, 1], dtype="int32"),
}
),
fanout_vals=[2, 2],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
).compute()
assert sorted(sampling_results.sources.values_host.tolist()) == [0, 0, 1, 1, 2, 2]
assert sorted(sampling_results.destinations.values_host.tolist()) == [
0,
0,
1,
1,
2,
2,
]
assert sorted(sampling_results.weight.values_host.tolist()) == [
0.0,
0.0,
0.1,
0.1,
0.2,
0.2,
]
assert sorted(sampling_results.edge_id.values_host.tolist()) == [2, 2, 4, 4, 6, 6]
assert sorted(sampling_results.edge_type.values_host.tolist()) == [1, 1, 1, 1, 2, 2]
assert sorted(sampling_results.batch_id.values_host.tolist()) == [1, 1, 1, 1, 1, 1]
assert sorted(sampling_results.hop_id.values_host.tolist()) == [0, 0, 0, 1, 1, 1]
@pytest.mark.mg
@pytest.mark.parametrize("dataset", datasets)
@pytest.mark.parametrize("input_df", [cudf.DataFrame, dask_cudf.DataFrame])
@pytest.mark.parametrize("max_batches", [2, 8, 16, 32])
def test_uniform_neighbor_sample_batched(dask_client, dataset, input_df, max_batches):
num_workers = len(dask_client.scheduler_info()["workers"])
df = dataset.get_edgelist()
df["eid"] = cupy.arange(len(df), dtype=df["src"].dtype)
df["etp"] = cupy.zeros_like(df["eid"].to_cupy())
ddf = dask_cudf.from_cudf(df, npartitions=num_workers)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
input_vertices = dask_cudf.concat([df.src, df.dst]).unique().compute()
assert isinstance(input_vertices, cudf.Series)
input_vertices.name = "start"
input_vertices.index = cupy.random.permutation(len(input_vertices))
input_vertices = input_vertices.to_frame().reset_index(drop=True)
input_vertices["batch"] = cudf.Series(
cupy.random.randint(0, max_batches, len(input_vertices)), dtype="int32"
)
if input_df == dask_cudf.DataFrame:
input_vertices = dask_cudf.from_cudf(input_vertices, npartitions=num_workers)
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
start_list=input_vertices,
fanout_vals=[5, 5],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
)
for batch_id in range(max_batches):
output_starts_per_batch = (
sampling_results[
(sampling_results.batch_id == batch_id) & (sampling_results.hop_id == 0)
]
.sources.nunique()
.compute()
)
input_starts_per_batch = len(input_vertices[input_vertices.batch == batch_id])
# Should be <= to account for starts without outgoing edges
assert output_starts_per_batch <= input_starts_per_batch
@pytest.mark.mg
def test_uniform_neighbor_sample_exclude_sources_basic(dask_client):
df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 4, 1, 2, 3, 5, 4, 1, 0],
"dst": [1, 1, 2, 4, 3, 1, 5, 0, 2],
"eid": [9, 8, 7, 6, 5, 4, 3, 2, 1],
}
),
npartitions=1,
)
G = cugraph.MultiGraph(directed=True)
G.from_dask_cudf_edgelist(df, source="src", destination="dst", edge_id="eid")
sampling_results = (
cugraph.dask.uniform_neighbor_sample(
G,
cudf.DataFrame(
{
"seed": cudf.Series([0, 4, 1], dtype="int64"),
"batch": cudf.Series([1, 1, 1], dtype="int32"),
}
),
[2, 3, 3],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
random_state=62,
prior_sources_behavior="exclude",
)
.sort_values(by="hop_id")
.compute()
)
expected_hop_0 = [1, 2, 1, 5, 2, 0]
assert sorted(
sampling_results[sampling_results.hop_id == 0].destinations.values_host.tolist()
) == sorted(expected_hop_0)
next_sources = set(
sampling_results[sampling_results.hop_id > 0].sources.values_host.tolist()
)
for v in [0, 4, 1]:
assert v not in next_sources
next_sources = set(
sampling_results[sampling_results.hop_id > 1].sources.values_host.tolist()
)
for v in sampling_results[
sampling_results.hop_id == 1
].sources.values_host.tolist():
assert v not in next_sources
@pytest.mark.mg
def test_uniform_neighbor_sample_exclude_sources_email_eu_core(dask_client):
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=8)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.001 * len(el)))
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
[5, 4, 3, 2, 1],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
prior_sources_behavior="exclude",
).compute()
for hop in range(5):
current_sources = set(
sampling_results[
sampling_results.hop_id == hop
].sources.values_host.tolist()
)
future_sources = set(
sampling_results[sampling_results.hop_id > hop].sources.values_host.tolist()
)
for s in current_sources:
assert s not in future_sources
@pytest.mark.mg
def test_uniform_neighbor_sample_carry_over_sources_basic(dask_client):
df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 4, 1, 2, 3, 5, 4, 1, 0, 6],
"dst": [1, 1, 2, 4, 6, 1, 5, 0, 2, 2],
"eid": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
}
),
npartitions=4,
)
G = cugraph.MultiGraph(directed=True)
G.from_dask_cudf_edgelist(df, source="src", destination="dst", edge_id="eid")
sampling_results = (
cugraph.dask.uniform_neighbor_sample(
G,
cudf.DataFrame(
{
"seed": cudf.Series([0, 4, 3], dtype="int64"),
"batch": cudf.Series([1, 1, 1], dtype="int32"),
}
),
[2, 3, 3],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
random_state=62,
prior_sources_behavior="carryover",
)
.sort_values(by="hop_id")[["sources", "destinations", "hop_id"]]
.compute()
)
assert (
len(
sampling_results[
(sampling_results.hop_id == 2) & (sampling_results.sources == 6)
]
)
== 2
)
for hop in range(2):
sources_current_hop = set(
sampling_results[
sampling_results.hop_id == hop
].sources.values_host.tolist()
)
sources_next_hop = set(
sampling_results[
sampling_results.hop_id == (hop + 1)
].sources.values_host.tolist()
)
for s in sources_current_hop:
assert s in sources_next_hop
@pytest.mark.mg
def test_uniform_neighbor_sample_carry_over_sources_email_eu_core(dask_client):
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=8)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.001 * len(el)))
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
[5, 4, 3, 2, 1],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
prior_sources_behavior="carryover",
).compute()
for hop in range(4):
sources_current_hop = set(
sampling_results[
sampling_results.hop_id == hop
].sources.values_host.tolist()
)
sources_next_hop = set(
sampling_results[
sampling_results.hop_id == (hop + 1)
].sources.values_host.tolist()
)
for s in sources_current_hop:
assert s in sources_next_hop
@pytest.mark.mg
def test_uniform_neighbor_sample_deduplicate_sources_email_eu_core(dask_client):
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=8)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.001 * len(el)))
sampling_results = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
[5, 4, 3, 2, 1],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
).compute()
for hop in range(5):
counts_current_hop = (
sampling_results[sampling_results.hop_id == hop]
.sources.value_counts()
.values_host.tolist()
)
for c in counts_current_hop:
assert c <= 5 - hop
@pytest.mark.mg
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
def test_uniform_neighbor_sample_renumber(dask_client, hops):
# FIXME This test is not very good because there is a lot of
# non-deterministic behavior that still exists despite passing
# a random seed. Right now, there are tests in cuGraph-DGL and
# cuGraph-PyG that provide better coverage, but a better test
# should eventually be written to augment or replace this one.
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=4)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.0001 * len(el)))
sampling_results_renumbered, renumber_map = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
renumber=True,
random_state=62,
keep_batches_together=True,
min_batch_id=0,
max_batch_id=0,
)
sampling_results_renumbered = sampling_results_renumbered.compute()
renumber_map = renumber_map.compute()
sources_hop_0 = sampling_results_renumbered[
sampling_results_renumbered.hop_id == 0
].sources
assert (renumber_map.batch_id == 0).all()
assert (
renumber_map.map.nunique()
== cudf.concat(
[sources_hop_0, sampling_results_renumbered.destinations]
).nunique()
)
@pytest.mark.mg
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
def test_uniform_neighbor_sample_offset_renumber(dask_client, hops):
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=4)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.0001 * len(el)))
(
sampling_results_unrenumbered,
offsets_unrenumbered,
) = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
renumber=False,
return_offsets=True,
random_state=62,
)
sampling_results_unrenumbered = sampling_results_unrenumbered.compute()
offsets_unrenumbered = offsets_unrenumbered.compute()
(
sampling_results_renumbered,
offsets_renumbered,
renumber_map,
) = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
renumber=True,
keep_batches_together=True,
min_batch_id=0,
max_batch_id=0,
return_offsets=True,
random_state=62,
)
# can't use compute() since empty batches still get a partition
n_workers = len(dask_client.scheduler_info()["workers"])
for p in range(n_workers):
partition = offsets_renumbered.get_partition(p).compute()
if not pandas.isna(partition.batch_id.iloc[0]):
break
sampling_results_renumbered = sampling_results_renumbered.get_partition(p).compute()
offsets_renumbered = offsets_renumbered.get_partition(p).compute()
renumber_map = renumber_map.get_partition(p).compute()
sources_hop_0 = sampling_results_unrenumbered[
sampling_results_unrenumbered.hop_id == 0
].sources
for hop in range(len(hops)):
destinations_hop = sampling_results_unrenumbered[
sampling_results_unrenumbered.hop_id <= hop
].destinations
expected_renumber_map = cudf.concat([sources_hop_0, destinations_hop]).unique()
assert sorted(expected_renumber_map.values_host.tolist()) == sorted(
renumber_map.map[0 : len(expected_renumber_map)].values_host.tolist()
)
renumber_map_offsets = offsets_renumbered.renumber_map_offsets.dropna()
assert len(renumber_map_offsets) == 2
assert renumber_map_offsets.iloc[0] == 0
assert renumber_map_offsets.iloc[-1] == len(renumber_map)
assert len(offsets_renumbered) == 2
@pytest.mark.mg
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
@pytest.mark.parametrize("seed", [62, 66, 68])
def test_uniform_neighbor_sample_csr_csc_global(dask_client, hops, seed):
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=4)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(seed, int(0.0001 * len(el)))
sampling_results, offsets, renumber_map = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
# carryover not valid because C++ sorts on (hop,src)
prior_sources_behavior="exclude",
renumber=True,
return_offsets=True,
random_state=seed,
use_legacy_names=False,
compress_per_hop=False,
compression="CSR",
include_hop_column=False,
keep_batches_together=True,
min_batch_id=0,
max_batch_id=0,
)
# can't use compute() since empty batches still get a partition
n_workers = len(dask_client.scheduler_info()["workers"])
for p in range(n_workers):
partition = offsets.get_partition(p).compute()
if not pandas.isna(partition.batch_id.iloc[0]):
break
sampling_results = sampling_results.get_partition(p).compute()
offsets = offsets.get_partition(p).compute()
renumber_map = renumber_map.get_partition(p).compute()
major_offsets = sampling_results["major_offsets"].dropna().values
majors = cudf.Series(cupy.arange(len(major_offsets) - 1))
majors = majors.repeat(cupy.diff(major_offsets))
minors = sampling_results["minors"].dropna()
assert len(majors) == len(minors)
majors = renumber_map.map.iloc[majors]
minors = renumber_map.map.iloc[minors]
for i in range(len(majors)):
assert 1 == len(el[(el.src == majors.iloc[i]) & (el.dst == minors.iloc[i])])
@pytest.mark.mg
@pytest.mark.parametrize("seed", [62, 66, 68])
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
def test_uniform_neighbor_sample_csr_csc_local(dask_client, hops, seed):
el = dask_cudf.from_cudf(email_Eu_core.get_edgelist(), npartitions=4)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(el, source="src", destination="dst")
seeds = dask_cudf.from_cudf(
cudf.Series([49, 71], dtype="int32"), npartitions=1
) # hardcoded to ensure out-degree is high enough
sampling_results, offsets, renumber_map = cugraph.dask.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
prior_sources_behavior="carryover",
renumber=True,
return_offsets=True,
random_state=seed,
use_legacy_names=False,
compress_per_hop=True,
compression="CSR",
include_hop_column=False,
keep_batches_together=True,
min_batch_id=0,
max_batch_id=0,
)
# can't use compute() since empty batches still get a partition
n_workers = len(dask_client.scheduler_info()["workers"])
for p in range(n_workers):
partition = offsets.get_partition(p).compute()
if not pandas.isna(partition.batch_id.iloc[0]):
break
sampling_results = sampling_results.get_partition(p).compute()
offsets = offsets.get_partition(p).compute()
renumber_map = renumber_map.get_partition(p).compute()
print(sampling_results)
print(offsets)
for hop in range(len(hops)):
major_offsets = sampling_results["major_offsets"].iloc[
offsets.offsets.iloc[hop] : (offsets.offsets.iloc[hop + 1] + 1)
]
minors = sampling_results["minors"].iloc[
major_offsets.iloc[0] : major_offsets.iloc[-1]
]
majors = cudf.Series(cupy.arange(len(major_offsets) - 1))
majors = majors.repeat(cupy.diff(major_offsets))
majors = renumber_map.map.iloc[majors]
minors = renumber_map.map.iloc[minors]
for i in range(len(majors)):
assert 1 == len(el[(el.src == majors.iloc[i]) & (el.dst == minors.iloc[i])])
@pytest.mark.mg
@pytest.mark.skip(reason="needs to be written!")
def test_uniform_neighbor_sample_dcsr_dcsc_global():
raise NotImplementedError
@pytest.mark.mg
@pytest.mark.skip(reason="needs to be written!")
def test_uniform_neighbor_sample_dcsr_dcsc_local():
raise NotImplementedError
# =============================================================================
# Benchmarks
# =============================================================================
@pytest.mark.mg
@pytest.mark.slow
@pytest.mark.parametrize("n_samples", [1_000, 5_000, 10_000])
def bench_uniform_neighbor_sample_email_eu_core(gpubenchmark, dask_client, n_samples):
input_data_path = email_Eu_core.get_path()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "int32"],
)
dg = cugraph.Graph(directed=False)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
store_transposed=False,
)
# Partition the dataframe to add in chunks
srcs = dg.input_df["src"]
start_list = srcs[:n_samples].compute()
def func():
_ = cugraph.dask.uniform_neighbor_sample(dg, start_list, [10])
del _
gpubenchmark(func)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_bulk_sampler_io.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import pytest
import cupy
import cudf
from cugraph.gnn.data_loading.bulk_sampler_io import write_samples
from cugraph.utilities.utils import create_directory_with_overwrite
@pytest.mark.sg
def test_bulk_sampler_io(scratch_dir):
results = cudf.DataFrame(
{
"sources": [0, 0, 1, 2, 2, 2, 3, 4, 5, 5, 6, 7],
"destinations": [1, 2, 3, 3, 3, 4, 1, 1, 6, 7, 2, 3],
"edge_id": None,
"edge_type": None,
"weight": None,
"hop_id": [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
}
)
assert len(results) == 12
offsets = cudf.DataFrame({"offsets": [0, 8, 12], "batch_id": [0, 1, None]})
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_io")
create_directory_with_overwrite(samples_path)
write_samples(results, offsets, None, 1, samples_path)
assert len(os.listdir(samples_path)) == 2
df = cudf.read_parquet(os.path.join(samples_path, "batch=0-0.parquet"))
assert len(df) == 8
assert (
df.sources.values_host.tolist()
== results.sources.iloc[0:8].values_host.tolist()
)
assert (
df.destinations.values_host.tolist()
== results.destinations.iloc[0:8].values_host.tolist()
)
assert (
df.hop_id.values_host.tolist() == results.hop_id.iloc[0:8].values_host.tolist()
)
assert (df.batch_id == 0).all()
df = cudf.read_parquet(os.path.join(samples_path, "batch=1-1.parquet"))
assert len(df) == 4
assert (
df.sources.values_host.tolist()
== results.sources.iloc[8:12].values_host.tolist()
)
assert (
df.destinations.values_host.tolist()
== results.destinations.iloc[8:12].values_host.tolist()
)
assert (
df.hop_id.values_host.tolist() == results.hop_id.iloc[8:12].values_host.tolist()
)
assert (df.batch_id == 1).all()
shutil.rmtree(samples_path)
@pytest.mark.sg
def test_bulk_sampler_io_empty_batch(scratch_dir):
sources_array = [
0,
0,
1,
2,
2,
2,
3,
4,
5,
5,
6,
7,
9,
9,
12,
13,
29,
29,
31,
14,
]
destinations_array = [
1,
2,
3,
3,
3,
4,
1,
1,
6,
7,
2,
3,
12,
13,
18,
19,
31,
14,
15,
16,
]
hops_array = [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
results = cudf.DataFrame(
{
"sources": sources_array,
"destinations": destinations_array,
"edge_id": None,
"edge_type": None,
"weight": None,
"hop_id": hops_array,
}
)
assert len(results) == 20
# some batches are missing
offsets = cudf.DataFrame(
{"offsets": [0, 8, 12, 16, 20], "batch_id": [0, 3, 4, 10, None]}
)
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_io_empty_batch")
create_directory_with_overwrite(samples_path)
write_samples(results, offsets, None, 2, samples_path)
files = os.listdir(samples_path)
assert len(files) == 2
df0 = cudf.read_parquet(os.path.join(samples_path, "batch=0-1.parquet"))
assert df0.batch_id.min() == 0
assert df0.batch_id.max() == 1
df1 = cudf.read_parquet(os.path.join(samples_path, "batch=4-5.parquet"))
assert df1.batch_id.min() == 4
assert df1.batch_id.max() == 5
shutil.rmtree(samples_path)
@pytest.mark.sg
def test_bulk_sampler_io_mock_csr(scratch_dir):
major_offsets_array = cudf.Series([0, 5, 10, 15])
minors_array = cudf.Series([1, 2, 3, 4, 8, 9, 1, 3, 4, 5, 3, 0, 4, 9, 1])
edge_ids = cudf.Series(cupy.arange(len(minors_array)))
# 2 hops
label_hop_offsets = cudf.Series([0, 1, 3])
# map
renumber_map = cudf.Series(cupy.arange(10))
renumber_map_offsets = cudf.Series([0, 10])
results_df = cudf.DataFrame()
results_df["minors"] = minors_array
results_df["major_offsets"] = major_offsets_array
results_df["edge_id"] = edge_ids
results_df["edge_type"] = None
results_df["weight"] = None
offsets_df = cudf.DataFrame()
offsets_df["offsets"] = label_hop_offsets
offsets_df["renumber_map_offsets"] = renumber_map_offsets
offsets_df["batch_id"] = cudf.Series([0])
renumber_df = cudf.DataFrame()
renumber_df["map"] = renumber_map
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_io_mock_csr")
create_directory_with_overwrite(samples_path)
write_samples(results_df, offsets_df, renumber_df, 1, samples_path)
result = cudf.read_parquet(os.path.join(samples_path, "batch=0-0.parquet"))
assert (
result.minors.dropna().values_host.tolist() == minors_array.values_host.tolist()
)
assert (
result.major_offsets.dropna().values_host.tolist()
== major_offsets_array.values_host.tolist()
)
assert result.edge_id.dropna().values_host.tolist() == edge_ids.values_host.tolist()
assert (
result.renumber_map_offsets.dropna().values_host.tolist()
== renumber_map_offsets.values_host.tolist()
)
assert result.map.dropna().values_host.tolist() == renumber_map.values_host.tolist()
assert (
result.label_hop_offsets.dropna().values_host.tolist()
== label_hop_offsets.values_host.tolist()
)
shutil.rmtree(samples_path)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_uniform_neighbor_sample.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import cupy
import cudf
import cugraph
from cugraph import uniform_neighbor_sample
from cugraph.testing import UNDIRECTED_DATASETS
from cugraph.datasets import email_Eu_core, small_tree
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
IS_DIRECTED = [True, False]
datasets = UNDIRECTED_DATASETS + [email_Eu_core]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
([False, True], "with_replacement"),
(["int32", "float32"], "indices_type"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(
("graph_file", "directed", "with_replacement", "indices_type"),
request.param,
)
)
indices_type = parameters["indices_type"]
input_data_path = parameters["graph_file"].get_path()
print("data path:", input_data_path)
directed = parameters["directed"]
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", indices_type],
)
G = cugraph.Graph(directed=directed)
G.from_cudf_edgelist(df, source="src", destination="dst", edge_attr="value")
parameters["Graph"] = G
# sample k vertices from the cuGraph graph
k = random.randint(1, 3)
srcs = G.view_edge_list()["src"]
dsts = G.view_edge_list()["dst"]
vertices = cudf.concat([srcs, dsts]).drop_duplicates()
start_list = vertices.sample(k).astype("int32")
# Generate a random fanout_vals list of length random(1, k)
fanout_vals = [random.randint(1, k) for _ in range(random.randint(1, k))]
# These prints are for debugging purposes since the vertices and
# the fanout_vals are randomly sampled/chosen
print("\nstart_list: \n", start_list)
print("fanout_vals: ", fanout_vals)
parameters["start_list"] = start_list
parameters["fanout_vals"] = fanout_vals
return parameters
@pytest.fixture(scope="module")
def simple_unweighted_input_expected_output(request):
"""
Fixture for providing the input for a uniform_neighbor_sample test using a
small/simple unweighted graph and the corresponding expected output.
"""
test_data = {}
df = cudf.DataFrame(
{"src": [0, 1, 2, 2, 0, 1, 4, 4], "dst": [3, 2, 1, 4, 1, 3, 1, 2]}
)
G = cugraph.Graph()
G.from_cudf_edgelist(df, source="src", destination="dst")
test_data["Graph"] = G
test_data["start_list"] = cudf.Series([0], dtype="int32")
test_data["fanout_vals"] = [-1]
test_data["with_replacement"] = True
test_data["expected_src"] = [0, 0]
test_data["expected_dst"] = [3, 1]
return test_data
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
@pytest.mark.cugraph_ops
def test_uniform_neighbor_sample_simple(input_combo):
G = input_combo["Graph"]
#
# Make sure the old C++ renumbering was skipped because:
# 1) Pylibcugraph already does renumbering
# 2) Uniform neighborhood sampling allows int32 weights
# which are not supported by the C++ renumbering
# This should be 'True' only for string vertices and multi columns vertices
#
assert G.renumbered is False
# Retrieve the input dataframe.
# FIXME: in simpleGraph and simpleDistributedGraph, G.edgelist.edgelist_df
# should be 'None' if the datasets was never renumbered
input_df = G.edgelist.edgelist_df
result_nbr = uniform_neighbor_sample(
G,
input_combo["start_list"],
input_combo["fanout_vals"],
with_replacement=input_combo["with_replacement"],
)
print(input_df)
print(result_nbr)
# multi edges are dropped to easily verify that each edge in the
# results is present in the input dataframe
result_nbr = result_nbr.drop_duplicates()
# FIXME: The indices are not included in the comparison because garbage
# value are intermittently retuned. This observation is observed
# when passing float weights
join = result_nbr.merge(
input_df, left_on=[*result_nbr.columns[:2]], right_on=[*input_df.columns[:2]]
)
if len(result_nbr) != len(join):
join2 = input_df.merge(
result_nbr,
how="right",
left_on=[*input_df.columns],
right_on=[*result_nbr.columns],
)
# The left part of the datasets shows which edge is missing from the
# right part where the left and right part are respectively the
# uniform-neighbor-sample results and the input dataframe.
difference = (
join2.sort_values([*result_nbr.columns])
.to_pandas()
.query("src.isnull()", engine="python")
)
invalid_edge = difference[difference.columns[:3]]
raise Exception(
f"\nThe edges below from uniform-neighbor-sample "
f"are invalid\n {invalid_edge}"
)
# Ensure the right indices type is returned
assert result_nbr["indices"].dtype == input_combo["indices_type"]
sampled_vertex_result = (
cudf.concat([result_nbr["sources"], result_nbr["destinations"]])
.drop_duplicates()
.reset_index(drop=True)
)
sampled_vertex_result = sampled_vertex_result.to_pandas()
start_list = input_combo["start_list"].to_pandas()
if not set(start_list).issubset(set(sampled_vertex_result)):
missing_vertex = set(start_list) - set(sampled_vertex_result)
missing_vertex = list(missing_vertex)
# compute the out-degree of the missing vertices
out_degree = G.out_degree(missing_vertex)
out_degree = out_degree[out_degree.degree != 0]
# If the missing vertices have outgoing edges, return an error
if len(out_degree) != 0:
missing_vertex = out_degree["vertex"].to_pandas().to_list()
raise Exception(
f"vertex {missing_vertex} is missing from "
f"uniform neighbor sampling results"
)
@pytest.mark.sg
@pytest.mark.cugraph_ops
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_uniform_neighbor_sample_tree(directed):
input_data_path = small_tree.get_path()
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
G = cugraph.Graph(directed=directed)
G.from_cudf_edgelist(df, "src", "dst", "value")
#
# Make sure the old C++ renumbering was skipped because:
# 1) Pylibcugraph already does renumbering
# 2) Uniform neighborhood sampling allows int32 weights
# which are not supported by the C++ renumbering
# This should be 'True' only for string vertices and multi columns vertices
#
assert G.renumbered is False
# Retrieve the input dataframe.
# input_df != df if 'directed = False' because df will be symmetrized
# internally.
input_df = G.edgelist.edgelist_df
# TODO: Incomplete, include more testing for tree graph as well as
# for larger graphs
start_list = cudf.Series([0, 0], dtype="int32")
fanout_vals = [4, 1, 3]
with_replacement = True
result_nbr = uniform_neighbor_sample(
G, start_list, fanout_vals, with_replacement=with_replacement
)
result_nbr = result_nbr.drop_duplicates()
join = result_nbr.merge(
input_df, left_on=[*result_nbr.columns[:2]], right_on=[*input_df.columns[:2]]
)
assert len(join) == len(result_nbr)
# Since the validity of results have (probably) been tested at both the C++
# and C layers, simply test that the python interface and conversions were
# done correctly.
assert result_nbr["sources"].dtype == "int32"
assert result_nbr["destinations"].dtype == "int32"
assert result_nbr["indices"].dtype == "float32"
result_nbr_vertices = (
cudf.concat([result_nbr["sources"], result_nbr["destinations"]])
.drop_duplicates()
.reset_index(drop=True)
)
assert set(start_list.to_pandas()).issubset(set(result_nbr_vertices.to_pandas()))
@pytest.mark.sg
@pytest.mark.cugraph_ops
def test_uniform_neighbor_sample_unweighted(simple_unweighted_input_expected_output):
test_data = simple_unweighted_input_expected_output
sampling_results = uniform_neighbor_sample(
test_data["Graph"],
test_data["start_list"].astype("int64"),
test_data["fanout_vals"],
with_replacement=test_data["with_replacement"],
)
actual_src = sampling_results.sources
actual_src = actual_src.to_arrow().to_pylist()
assert sorted(actual_src) == sorted(test_data["expected_src"])
actual_dst = sampling_results.destinations
actual_dst = actual_dst.to_arrow().to_pylist()
assert sorted(actual_dst) == sorted(test_data["expected_dst"])
@pytest.mark.sg
@pytest.mark.cugraph_ops
@pytest.mark.parametrize("return_offsets", [True, False])
@pytest.mark.parametrize("include_hop_column", [True, False])
def test_uniform_neighbor_sample_edge_properties(return_offsets, include_hop_column):
edgelist_df = cudf.DataFrame(
{
"src": cudf.Series([0, 1, 2, 3, 4, 3, 4, 2, 0, 1, 0, 2], dtype="int32"),
"dst": cudf.Series([1, 2, 4, 2, 3, 4, 1, 1, 2, 3, 4, 4], dtype="int32"),
"eid": cudf.Series([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], dtype="int32"),
"etp": cudf.Series([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 0], dtype="int32"),
"w": [0.0, 0.1, 0.2, 3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.10, 0.11],
}
)
start_df = cudf.DataFrame(
{
"seed": cudf.Series([0, 4], dtype="int32"),
"batch": cudf.Series([0, 1], dtype="int32"),
}
)
G = cugraph.MultiGraph(directed=True)
G.from_cudf_edgelist(
edgelist_df,
source="src",
destination="dst",
edge_attr=["w", "eid", "etp"],
)
sampling_results = uniform_neighbor_sample(
G,
start_list=start_df,
fanout_vals=[2, 2],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
return_offsets=return_offsets,
include_hop_column=include_hop_column,
)
if return_offsets:
sampling_results, sampling_offsets = sampling_results
edgelist_df.set_index("eid")
assert (
edgelist_df.loc[sampling_results.edge_id]["w"].values_host.tolist()
== sampling_results["weight"].values_host.tolist()
)
assert (
edgelist_df.loc[sampling_results.edge_id]["etp"].values_host.tolist()
== sampling_results["edge_type"].values_host.tolist()
)
assert (
edgelist_df.loc[sampling_results.edge_id]["src"].values_host.tolist()
== sampling_results["sources"].values_host.tolist()
)
assert (
edgelist_df.loc[sampling_results.edge_id]["dst"].values_host.tolist()
== sampling_results["destinations"].values_host.tolist()
)
if include_hop_column:
assert sampling_results["hop_id"].values_host.tolist() == (
[0, 0, 1, 1, 1, 1] * 2
)
else:
assert "hop_id" not in sampling_results
if return_offsets:
assert sampling_offsets["batch_id"].dropna().values_host.tolist() == [0, 1]
if include_hop_column:
assert sampling_offsets["offsets"].dropna().values_host.tolist() == [
0,
6,
12,
]
else:
assert sampling_offsets["offsets"].dropna().values_host.tolist() == [
0,
2,
6,
8,
12,
]
else:
assert sampling_results["batch_id"].values_host.tolist() == ([0] * 6 + [1] * 6)
@pytest.mark.sg
def test_uniform_neighbor_sample_edge_properties_self_loops():
df = cudf.DataFrame(
{
"src": [0, 1, 2],
"dst": [0, 1, 2],
"eid": [2, 4, 6],
"etp": cudf.Series([1, 1, 2], dtype="int32"),
"w": [0.0, 0.1, 0.2],
}
)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
df,
source="src",
destination="dst",
edge_attr=["w", "eid", "etp"],
)
sampling_results = cugraph.uniform_neighbor_sample(
G,
start_list=cudf.DataFrame(
{
"start": cudf.Series([0, 1, 2]),
"batch": cudf.Series([1, 1, 1], dtype="int32"),
}
),
fanout_vals=[2, 2],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
random_state=80,
)
assert sorted(sampling_results.sources.values_host.tolist()) == [0, 0, 1, 1, 2, 2]
assert sorted(sampling_results.destinations.values_host.tolist()) == [
0,
0,
1,
1,
2,
2,
]
assert sorted(sampling_results.weight.values_host.tolist()) == [
0.0,
0.0,
0.1,
0.1,
0.2,
0.2,
]
assert sorted(sampling_results.edge_id.values_host.tolist()) == [2, 2, 4, 4, 6, 6]
assert sorted(sampling_results.edge_type.values_host.tolist()) == [1, 1, 1, 1, 2, 2]
assert sorted(sampling_results.batch_id.values_host.tolist()) == [1, 1, 1, 1, 1, 1]
assert sorted(sampling_results.hop_id.values_host.tolist()) == [0, 0, 0, 1, 1, 1]
@pytest.mark.sg
def test_uniform_neighbor_sample_hop_id_order():
df = cudf.DataFrame(
{
"src": [0, 1, 2, 3, 3, 6],
"dst": [2, 3, 4, 5, 6, 7],
}
)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(df, source="src", destination="dst")
sampling_results = cugraph.uniform_neighbor_sample(
G,
cudf.Series([0, 1], dtype="int64"),
fanout_vals=[2, 2, 2],
with_replacement=False,
with_edge_properties=True,
)
assert (
sorted(sampling_results.hop_id.values_host.tolist())
== sampling_results.hop_id.values_host.tolist()
)
@pytest.mark.sg
def test_uniform_neighbor_sample_hop_id_order_multi_batch():
df = cudf.DataFrame(
{
"src": [0, 1, 2, 3, 3, 6],
"dst": [2, 3, 4, 5, 6, 7],
}
)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(df, source="src", destination="dst")
sampling_results = cugraph.uniform_neighbor_sample(
G,
start_list=cudf.DataFrame(
{
"start": cudf.Series([0, 1], dtype="int64"),
"batch": cudf.Series([0, 1], dtype="int32"),
}
),
fanout_vals=[2, 2, 2],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
)
for b in range(2):
assert (
sorted(
sampling_results[
sampling_results.batch_id == b
].hop_id.values_host.tolist()
)
== sampling_results[
sampling_results.batch_id == b
].hop_id.values_host.tolist()
)
@pytest.mark.sg
def test_uniform_neighbor_sample_empty_start_list():
df = cudf.DataFrame(
{
"src": [0, 1, 2],
"dst": [0, 1, 2],
"eid": [2, 4, 6],
"etp": cudf.Series([1, 1, 2], dtype="int32"),
"w": [0.0, 0.1, 0.2],
}
)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
df,
source="src",
destination="dst",
edge_attr=["w", "eid", "etp"],
)
sampling_results = cugraph.uniform_neighbor_sample(
G,
start_list=cudf.DataFrame(
{
"start_list": cudf.Series(dtype="int64"),
"batch_id_list": cudf.Series(dtype="int32"),
}
),
fanout_vals=[2, 2],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
random_state=32,
)
assert sampling_results.empty
@pytest.mark.sg
def test_uniform_neighbor_sample_exclude_sources_basic():
df = cudf.DataFrame(
{
"src": [0, 4, 1, 2, 3, 5, 4, 1, 0],
"dst": [1, 1, 2, 4, 3, 1, 5, 0, 2],
"eid": [9, 8, 7, 6, 5, 4, 3, 2, 1],
}
)
G = cugraph.MultiGraph(directed=True)
G.from_cudf_edgelist(df, source="src", destination="dst", edge_id="eid")
sampling_results = cugraph.uniform_neighbor_sample(
G,
cudf.DataFrame(
{
"seed": cudf.Series([0, 4, 1], dtype="int64"),
"batch": cudf.Series([1, 1, 1], dtype="int32"),
}
),
[2, 3, 3],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
random_state=62,
prior_sources_behavior="exclude",
).sort_values(by="hop_id")
expected_hop_0 = [1, 2, 1, 5, 2, 0]
assert sorted(
sampling_results[sampling_results.hop_id == 0].destinations.values_host.tolist()
) == sorted(expected_hop_0)
next_sources = set(
sampling_results[sampling_results.hop_id > 0].sources.values_host.tolist()
)
for v in [0, 4, 1]:
assert v not in next_sources
next_sources = set(
sampling_results[sampling_results.hop_id > 1].sources.values_host.tolist()
)
for v in sampling_results[
sampling_results.hop_id == 1
].sources.values_host.tolist():
assert v not in next_sources
@pytest.mark.sg
def test_uniform_neighbor_sample_exclude_sources_email_eu_core():
el = email_Eu_core.get_edgelist()
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.001 * len(el)))
sampling_results = cugraph.uniform_neighbor_sample(
G,
seeds,
[5, 4, 3, 2, 1],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
prior_sources_behavior="exclude",
)
for hop in range(5):
current_sources = set(
sampling_results[
sampling_results.hop_id == hop
].sources.values_host.tolist()
)
future_sources = set(
sampling_results[sampling_results.hop_id > hop].sources.values_host.tolist()
)
for s in current_sources:
assert s not in future_sources
@pytest.mark.sg
def test_uniform_neighbor_sample_carry_over_sources_basic():
df = cudf.DataFrame(
{
"src": [0, 4, 1, 2, 3, 5, 4, 1, 0, 6],
"dst": [1, 1, 2, 4, 6, 1, 5, 0, 2, 2],
"eid": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
}
)
G = cugraph.MultiGraph(directed=True)
G.from_cudf_edgelist(df, source="src", destination="dst", edge_id="eid")
sampling_results = cugraph.uniform_neighbor_sample(
G,
cudf.DataFrame(
{
"seed": cudf.Series([0, 4, 3], dtype="int64"),
"batch": cudf.Series([1, 1, 1], dtype="int32"),
}
),
[2, 3, 3],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=True,
random_state=62,
prior_sources_behavior="carryover",
).sort_values(by="hop_id")[["sources", "destinations", "hop_id"]]
assert (
len(
sampling_results[
(sampling_results.hop_id == 2) & (sampling_results.sources == 6)
]
)
== 2
)
for hop in range(2):
sources_current_hop = set(
sampling_results[
sampling_results.hop_id == hop
].sources.values_host.tolist()
)
sources_next_hop = set(
sampling_results[
sampling_results.hop_id == (hop + 1)
].sources.values_host.tolist()
)
for s in sources_current_hop:
assert s in sources_next_hop
@pytest.mark.sg
def test_uniform_neighbor_sample_carry_over_sources_email_eu_core():
el = email_Eu_core.get_edgelist()
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.001 * len(el)))
sampling_results = cugraph.uniform_neighbor_sample(
G,
seeds,
[5, 4, 3, 2, 1],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
prior_sources_behavior="carryover",
)
for hop in range(4):
sources_current_hop = set(
sampling_results[
sampling_results.hop_id == hop
].sources.values_host.tolist()
)
sources_next_hop = set(
sampling_results[
sampling_results.hop_id == (hop + 1)
].sources.values_host.tolist()
)
for s in sources_current_hop:
assert s in sources_next_hop
@pytest.mark.sg
def test_uniform_neighbor_sample_deduplicate_sources_email_eu_core():
el = email_Eu_core.get_edgelist()
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.001 * len(el)))
sampling_results = cugraph.uniform_neighbor_sample(
G,
seeds,
[5, 4, 3, 2, 1],
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
)
for hop in range(5):
counts_current_hop = (
sampling_results[sampling_results.hop_id == hop]
.sources.value_counts()
.values_host.tolist()
)
for c in counts_current_hop:
assert c <= 5 - hop
@pytest.mark.sg
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
def test_uniform_neighbor_sample_renumber(hops):
el = email_Eu_core.get_edgelist()
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.0001 * len(el)))
sampling_results_unrenumbered = cugraph.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
renumber=False,
random_state=62,
)
sampling_results_renumbered, renumber_map = cugraph.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
renumber=True,
random_state=62,
)
sources_hop_0 = sampling_results_unrenumbered[
sampling_results_unrenumbered.hop_id == 0
].sources
for hop in range(len(hops)):
destinations_hop = sampling_results_unrenumbered[
sampling_results_unrenumbered.hop_id <= hop
].destinations
expected_renumber_map = cudf.concat([sources_hop_0, destinations_hop]).unique()
assert sorted(expected_renumber_map.values_host.tolist()) == sorted(
renumber_map.map[0 : len(expected_renumber_map)].values_host.tolist()
)
assert (renumber_map.batch_id == 0).all()
@pytest.mark.sg
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
def test_uniform_neighbor_sample_offset_renumber(hops):
el = email_Eu_core.get_edgelist()
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(62, int(0.0001 * len(el)))
(
sampling_results_unrenumbered,
offsets_unrenumbered,
) = cugraph.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
renumber=False,
return_offsets=True,
random_state=62,
)
(
sampling_results_renumbered,
offsets_renumbered,
renumber_map,
) = cugraph.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
renumber=True,
return_offsets=True,
random_state=62,
)
sources_hop_0 = sampling_results_unrenumbered[
sampling_results_unrenumbered.hop_id == 0
].sources
for hop in range(len(hops)):
destinations_hop = sampling_results_unrenumbered[
sampling_results_unrenumbered.hop_id <= hop
].destinations
expected_renumber_map = cudf.concat([sources_hop_0, destinations_hop]).unique()
assert sorted(expected_renumber_map.values_host.tolist()) == sorted(
renumber_map.map[0 : len(expected_renumber_map)].values_host.tolist()
)
renumber_map_offsets = offsets_renumbered.renumber_map_offsets.dropna()
assert len(renumber_map_offsets) == 2
assert renumber_map_offsets.iloc[0] == 0
assert renumber_map_offsets.iloc[-1] == len(renumber_map)
assert len(offsets_renumbered) == 2
@pytest.mark.sg
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
@pytest.mark.parametrize("seed", [62, 66, 68])
def test_uniform_neighbor_sample_csr_csc_global(hops, seed):
el = email_Eu_core.get_edgelist()
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
seeds = G.select_random_vertices(seed, int(0.0001 * len(el)))
sampling_results, offsets, renumber_map = cugraph.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
# carryover not valid because C++ sorts on (hop,src)
prior_sources_behavior="exclude",
renumber=True,
return_offsets=True,
random_state=seed,
use_legacy_names=False,
compress_per_hop=False,
compression="CSR",
include_hop_column=False,
)
major_offsets = sampling_results["major_offsets"].dropna().values
majors = cudf.Series(cupy.arange(len(major_offsets) - 1))
majors = majors.repeat(cupy.diff(major_offsets))
minors = sampling_results["minors"].dropna()
assert len(majors) == len(minors)
majors = renumber_map.map.iloc[majors]
minors = renumber_map.map.iloc[minors]
for i in range(len(majors)):
assert 1 == len(el[(el.src == majors.iloc[i]) & (el.dst == minors.iloc[i])])
@pytest.mark.sg
@pytest.mark.parametrize("seed", [62, 66, 68])
@pytest.mark.parametrize("hops", [[5], [5, 5], [5, 5, 5]])
def test_uniform_neighbor_sample_csr_csc_local(hops, seed):
el = email_Eu_core.get_edgelist(download=True)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
seeds = cudf.Series(
[49, 71], dtype="int32"
) # hardcoded to ensure out-degree is high enough
sampling_results, offsets, renumber_map = cugraph.uniform_neighbor_sample(
G,
seeds,
hops,
with_replacement=False,
with_edge_properties=True,
with_batch_ids=False,
deduplicate_sources=True,
prior_sources_behavior="carryover",
renumber=True,
return_offsets=True,
random_state=seed,
use_legacy_names=False,
compress_per_hop=True,
compression="CSR",
include_hop_column=False,
)
for hop in range(len(hops)):
major_offsets = sampling_results["major_offsets"].iloc[
offsets.offsets.iloc[hop] : (offsets.offsets.iloc[hop + 1] + 1)
]
minors = sampling_results["minors"].iloc[
major_offsets.iloc[0] : major_offsets.iloc[-1]
]
majors = cudf.Series(cupy.arange(len(major_offsets) - 1))
majors = majors.repeat(cupy.diff(major_offsets))
majors = renumber_map.map.iloc[majors]
minors = renumber_map.map.iloc[minors]
for i in range(len(majors)):
assert 1 == len(el[(el.src == majors.iloc[i]) & (el.dst == minors.iloc[i])])
@pytest.mark.sg
@pytest.mark.skip(reason="needs to be written!")
def test_uniform_neighbor_sample_dcsr_dcsc_global():
raise NotImplementedError
@pytest.mark.sg
@pytest.mark.skip(reason="needs to be written!")
def test_uniform_neighbor_sample_dcsr_dcsc_local():
raise NotImplementedError
@pytest.mark.sg
@pytest.mark.skip(reason="needs to be written!")
def test_multi_client_sampling():
# See gist for example test to write
# https://gist.github.com/VibhuJawa/1b705427f7a0c5a2a4f58e0a3e71ef21
raise NotImplementedError
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_egonet_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cugraph
import dask_cudf
import cugraph.dask as dcg
from cugraph.testing import utils
from cugraph.dask.common.mg_utils import is_single_gpu
from pylibcugraph.testing import gen_fixture_params_product
from cudf.testing.testing import assert_frame_equal, assert_series_equal
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
SEEDS = [0, 5, 13, [0, 2]]
RADIUS = [1, 2, 3]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED + [
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv"
]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
(SEEDS, "seeds"),
(RADIUS, "radius"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(zip(("graph_file", "directed", "seeds", "radius"), request.param))
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the egonet algo.
(based on cuGraph batched_ego_graphs) which can be used for validation.
"""
input_data_path = input_combo["graph_file"]
directed = input_combo["directed"]
seeds = input_combo["seeds"]
radius = input_combo["radius"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=directed, edgevals=True
)
sg_cugraph_ego_graphs = cugraph.batched_ego_graphs(G, seeds=seeds, radius=radius)
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
input_combo["sg_cugraph_results"] = sg_cugraph_ego_graphs
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
store_transposed=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
def test_dask_mg_ego_graphs(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
result_ego_graph = benchmark(
dcg.ego_graph,
dg,
input_expected_output["seeds"],
input_expected_output["radius"],
)
mg_df, mg_offsets = result_ego_graph
mg_df = mg_df.compute()
mg_offsets = mg_offsets.compute().reset_index(drop=True)
sg_df, sg_offsets = input_expected_output["sg_cugraph_results"]
assert_series_equal(sg_offsets, mg_offsets, check_dtype=False)
# slice array from offsets, sort the df by src dst and compare
for i in range(len(sg_offsets) - 1):
start = sg_offsets[i]
end = sg_offsets[i + 1]
mg_df_part = mg_df[start:end].sort_values(["src", "dst"]).reset_index(drop=True)
sg_df_part = sg_df[start:end].sort_values(["src", "dst"]).reset_index(drop=True)
assert_frame_equal(mg_df_part, sg_df_part, check_dtype=False, check_like=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_egonet.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, DEFAULT_DATASETS
print("Networkx version : {} ".format(nx.__version__))
SEEDS = [0, 5, 13]
RADIUS = [1, 2, 3]
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("radius", RADIUS)
def test_ego_graph_nx(graph_file, seed, radius):
gc.collect()
# Nx
dataset_path = graph_file.get_path()
df = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
df, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
ego_nx = nx.ego_graph(Gnx, seed, radius=radius)
# cugraph
ego_cugraph = cugraph.ego_graph(Gnx, seed, radius=radius)
assert nx.is_isomorphic(ego_nx, ego_cugraph)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("seeds", [[0, 5, 13]])
@pytest.mark.parametrize("radius", [1, 2, 3])
def test_batched_ego_graphs(graph_file, seeds, radius):
gc.collect()
# Nx
dataset_path = graph_file.get_path()
df = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
df, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
# cugraph
df, offsets = cugraph.batched_ego_graphs(Gnx, seeds, radius=radius)
for i in range(len(seeds)):
ego_nx = nx.ego_graph(Gnx, seeds[i], radius=radius)
ego_df = df[offsets[i] : offsets[i + 1]]
ego_cugraph = nx.from_pandas_edgelist(
ego_df, source="src", target="dst", edge_attr="weight"
)
assert nx.is_isomorphic(ego_nx, ego_cugraph)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("seed", SEEDS)
@pytest.mark.parametrize("radius", RADIUS)
def test_multi_column_ego_graph(graph_file, seed, radius):
gc.collect()
dataset_path = graph_file.get_path()
df = utils.read_csv_file(dataset_path, read_weights_in_sp=True)
df.rename(columns={"0": "src_0", "1": "dst_0"}, inplace=True)
df["src_1"] = df["src_0"] + 1000
df["dst_1"] = df["dst_0"] + 1000
G1 = cugraph.Graph()
G1.from_cudf_edgelist(
df, source=["src_0", "src_1"], destination=["dst_0", "dst_1"], edge_attr="2"
)
seed_df = cudf.DataFrame()
seed_df["v_0"] = [seed]
seed_df["v_1"] = [seed + 1000]
ego_cugraph_res = cugraph.ego_graph(G1, seed_df, radius=radius)
G2 = cugraph.Graph()
G2.from_cudf_edgelist(df, source="src_0", destination="dst_0", edge_attr="2")
ego_cugraph_exp = cugraph.ego_graph(G2, seed, radius=radius)
# FIXME: Replace with multi-column view_edge_list()
edgelist_df = ego_cugraph_res.edgelist.edgelist_df
edgelist_df_res = ego_cugraph_res.unrenumber(edgelist_df, "src")
edgelist_df_res = ego_cugraph_res.unrenumber(edgelist_df_res, "dst")
for i in range(len(edgelist_df_res)):
assert ego_cugraph_exp.has_edge(
edgelist_df_res["0_src"].iloc[i], edgelist_df_res["0_dst"].iloc[i]
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_bulk_sampler.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cudf
import cupy
import cugraph
from cugraph.datasets import karate, email_Eu_core
from cugraph.experimental.gnn import BulkSampler
from cugraph.utilities.utils import create_directory_with_overwrite
import os
import shutil
import re
@pytest.mark.sg
def test_bulk_sampler_simple(scratch_dir):
el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"})
el["eid"] = el["eid"].astype("int32")
el["etp"] = cupy.int32(0)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
el,
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_simple")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=2,
output_path=samples_path,
graph=G,
fanout_vals=[2, 2],
with_replacement=False,
)
batches = cudf.DataFrame(
{
"start": cudf.Series([0, 5, 10, 15], dtype="int32"),
"batch": cudf.Series([0, 0, 1, 1], dtype="int32"),
}
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
recovered_samples = cudf.read_parquet(samples_path)
assert "map" not in recovered_samples.columns
for b in batches["batch"].unique().values_host.tolist():
assert b in recovered_samples["batch_id"].values_host.tolist()
shutil.rmtree(samples_path)
@pytest.mark.sg
def test_bulk_sampler_remainder(scratch_dir):
el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"})
el["eid"] = el["eid"].astype("int32")
el["etp"] = cupy.int32(0)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
el,
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_remainder")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=2,
output_path=samples_path,
graph=G,
seeds_per_call=7,
batches_per_partition=2,
fanout_vals=[2, 2],
with_replacement=False,
)
# Should process batch (0, 1, 2) then (3, 4, 5) then 6
batches = cudf.DataFrame(
{
"start": cudf.Series(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], dtype="int32"
),
"batch": cudf.Series(
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6], dtype="int32"
),
}
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
recovered_samples = cudf.read_parquet(samples_path)
assert "map" not in recovered_samples.columns
for b in batches["batch"].unique().values_host.tolist():
assert b in recovered_samples["batch_id"].values_host.tolist()
for x in range(0, 6, 2):
subdir = f"{x}-{x+1}"
df = cudf.read_parquet(os.path.join(samples_path, f"batch={subdir}.parquet"))
assert ((df.batch_id == x) | (df.batch_id == (x + 1))).all()
assert ((df.hop_id == 0) | (df.hop_id == 1)).all()
assert (
cudf.read_parquet(os.path.join(samples_path, "batch=6-6.parquet")).batch_id == 6
).all()
shutil.rmtree(samples_path)
@pytest.mark.sg
def test_bulk_sampler_large_batch_size(scratch_dir):
el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"})
el["eid"] = el["eid"].astype("int32")
el["etp"] = cupy.int32(0)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
el,
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_large_batch_size")
if os.path.exists(samples_path):
shutil.rmtree(samples_path)
os.makedirs(samples_path)
bs = BulkSampler(
batch_size=5120,
output_path=samples_path,
graph=G,
fanout_vals=[2, 2],
with_replacement=False,
)
batches = cudf.DataFrame(
{
"start": cudf.Series([0, 5, 10, 15], dtype="int32"),
"batch": cudf.Series([0, 0, 1, 1], dtype="int32"),
}
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
recovered_samples = cudf.read_parquet(samples_path)
assert "map" not in recovered_samples.columns
for b in batches["batch"].unique().values_host.tolist():
assert b in recovered_samples["batch_id"].values_host.tolist()
shutil.rmtree(samples_path)
@pytest.mark.sg
def test_bulk_sampler_partitions(scratch_dir):
el = karate.get_edgelist().reset_index().rename(columns={"index": "eid"})
el["eid"] = el["eid"].astype("int32")
el["etp"] = cupy.int32(0)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
el,
source="src",
destination="dst",
edge_attr=["wgt", "eid", "etp"],
)
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_partitions")
if os.path.exists(samples_path):
shutil.rmtree(samples_path)
os.makedirs(samples_path)
bs = BulkSampler(
batch_size=3,
output_path=samples_path,
graph=G,
fanout_vals=[2, 2],
with_replacement=False,
batches_per_partition=2,
renumber=True,
)
batches = cudf.DataFrame(
{
"start": cudf.Series([0, 5, 6, 10, 15, 17, 18, 9, 23], dtype="int32"),
"batch": cudf.Series([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype="int32"),
}
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
for file in os.listdir(samples_path):
start_batch_id, end_batch_id = [
int(x) for x in re.match(r"batch=([0-9]+)-([0-9]+).parquet", file).groups()
]
recovered_samples = cudf.read_parquet(os.path.join(samples_path, file))
recovered_map = recovered_samples.map
recovered_samples = recovered_samples.drop("map", axis=1).dropna()
for current_batch_id in range(start_batch_id, end_batch_id + 1):
map_start_ix = recovered_map.iloc[current_batch_id - start_batch_id]
map_end_ix = recovered_map.iloc[current_batch_id - start_batch_id + 1]
map_current_batch = recovered_map.iloc[map_start_ix:map_end_ix]
n_unique = cudf.concat(
[
recovered_samples[
recovered_samples.batch_id == current_batch_id
].sources,
recovered_samples[
recovered_samples.batch_id == current_batch_id
].destinations,
]
).nunique()
assert len(map_current_batch) == n_unique
@pytest.mark.sg
def test_bulk_sampler_empty_batches(scratch_dir):
edgelist = cudf.DataFrame(
{
"src": [0, 0, 1, 2, 3, 4, 5, 6],
"dst": [3, 2, 0, 7, 8, 9, 1, 2],
}
)
batches = cudf.DataFrame(
{
"start": [0, 1, 2, 7, 8, 9, 3, 2, 7],
"batch": cudf.Series([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype="int32"),
}
)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(edgelist, source="src", destination="dst")
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_empty_batches")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=3,
output_path=samples_path,
graph=G,
fanout_vals=[-1, -1],
with_replacement=False,
batches_per_partition=6,
renumber=False,
)
bs.add_batches(batches, start_col_name="start", batch_col_name="batch")
bs.flush()
assert len(os.listdir(samples_path)) == 1
df = cudf.read_parquet(os.path.join(samples_path, "batch=0-1.parquet"))
assert df[
(df.batch_id == 0) & (df.hop_id == 0)
].destinations.sort_values().values_host.tolist() == [0, 2, 3, 7]
assert df[
(df.batch_id == 0) & (df.hop_id == 1)
].destinations.sort_values().values_host.tolist() == [2, 3, 7, 8]
assert df[
(df.batch_id == 1) & (df.hop_id == 0)
].destinations.sort_values().values_host.tolist() == [7, 8]
assert len(df[(df.batch_id == 1) & (df.hop_id == 1)]) == 0
assert df.batch_id.max() == 1
shutil.rmtree(samples_path)
@pytest.mark.sg
def test_bulk_sampler_csr(scratch_dir):
el = email_Eu_core.get_edgelist()
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(el, source="src", destination="dst")
samples_path = os.path.join(scratch_dir, "test_bulk_sampler_csr")
create_directory_with_overwrite(samples_path)
bs = BulkSampler(
batch_size=7,
output_path=samples_path,
graph=G,
fanout_vals=[5, 4, 3],
with_replacement=False,
batches_per_partition=7,
renumber=True,
use_legacy_names=False,
compression="CSR",
compress_per_hop=False,
prior_sources_behavior="exclude",
include_hop_column=False,
)
seeds = G.select_random_vertices(62, 1000)
batch_ids = cudf.Series(
cupy.repeat(cupy.arange(int(1000 / 7) + 1, dtype="int32"), 7)[:1000]
).sort_values()
batch_df = cudf.DataFrame(
{
"seed": seeds,
"batch": batch_ids,
}
)
bs.add_batches(batch_df, start_col_name="seed", batch_col_name="batch")
bs.flush()
assert len(os.listdir(samples_path)) == 21
for file in os.listdir(samples_path):
df = cudf.read_parquet(os.path.join(samples_path, file))
assert df.major_offsets.dropna().iloc[-1] - df.major_offsets.iloc[0] == len(df)
shutil.rmtree(samples_path)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_bulk_sampler_io_mg.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import pytest
import cudf
import dask_cudf
from cugraph.gnn.data_loading.bulk_sampler_io import write_samples
from cugraph.utilities.utils import create_directory_with_overwrite
@pytest.mark.mg
def test_bulk_sampler_io(scratch_dir):
results = cudf.DataFrame(
{
"sources": [0, 0, 1, 2, 2, 2, 3, 4, 5, 5, 6, 7],
"destinations": [1, 2, 3, 3, 3, 4, 1, 1, 6, 7, 2, 3],
"edge_id": None,
"edge_type": None,
"weight": None,
"hop_id": [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
}
)
results = dask_cudf.from_cudf(results, npartitions=1).repartition(
divisions=[0, 8, 11]
)
assert len(results) == 12
offsets = cudf.DataFrame({"offsets": [0, 8, 0, 4], "batch_id": [0, None, 1, None]})
offsets = dask_cudf.from_cudf(offsets, npartitions=1).repartition(
divisions=[0, 2, 3]
)
samples_path = os.path.join(scratch_dir, "mg_test_bulk_sampler_io")
create_directory_with_overwrite(samples_path)
write_samples(results, offsets, None, 1, samples_path)
assert len(os.listdir(samples_path)) == 2
df = cudf.read_parquet(os.path.join(samples_path, "batch=0-0.parquet"))
assert len(df) == 8
results = results.compute()
assert (
df.sources.values_host.tolist()
== results.sources.iloc[0:8].values_host.tolist()
)
assert (
df.destinations.values_host.tolist()
== results.destinations.iloc[0:8].values_host.tolist()
)
assert (
df.hop_id.values_host.tolist() == results.hop_id.iloc[0:8].values_host.tolist()
)
assert (df.batch_id == 0).all()
df = cudf.read_parquet(os.path.join(samples_path, "batch=1-1.parquet"))
assert len(df) == 4
assert (
df.sources.values_host.tolist()
== results.sources.iloc[8:12].values_host.tolist()
)
assert (
df.destinations.values_host.tolist()
== results.destinations.iloc[8:12].values_host.tolist()
)
assert (
df.hop_id.values_host.tolist() == results.hop_id.iloc[8:12].values_host.tolist()
)
assert (df.batch_id == 1).all()
shutil.rmtree(samples_path)
@pytest.mark.sg
def test_bulk_sampler_io_empty_batch(scratch_dir):
sources_array = [
0,
0,
1,
2,
2,
2,
3,
4,
5,
5,
6,
7,
9,
9,
12,
13,
29,
29,
31,
14,
]
destinations_array = [
1,
2,
3,
3,
3,
4,
1,
1,
6,
7,
2,
3,
12,
13,
18,
19,
31,
14,
15,
16,
]
hops_array = [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
results = cudf.DataFrame(
{
"sources": sources_array,
"destinations": destinations_array,
"edge_id": None,
"edge_type": None,
"weight": None,
"hop_id": hops_array,
}
)
results = dask_cudf.from_cudf(results, npartitions=1).repartition(
divisions=[0, 12, 19]
)
# some batches are missing
offsets = cudf.DataFrame(
{"offsets": [0, 8, 12, 0, 4, 8], "batch_id": [0, 3, None, 4, 10, None]}
)
offsets = dask_cudf.from_cudf(offsets, npartitions=1).repartition(
divisions=[0, 3, 5]
)
samples_path = os.path.join(scratch_dir, "mg_test_bulk_sampler_io_empty_batch")
create_directory_with_overwrite(samples_path)
write_samples(results, offsets, None, 2, samples_path)
files = os.listdir(samples_path)
assert len(files) == 2
df0 = cudf.read_parquet(os.path.join(samples_path, "batch=0-1.parquet"))
assert df0.batch_id.min() == 0
assert df0.batch_id.max() == 1
df1 = cudf.read_parquet(os.path.join(samples_path, "batch=4-5.parquet"))
assert df1.batch_id.min() == 4
assert df1.batch_id.max() == 5
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/sampling/test_node2vec.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import cudf
import cugraph
from cugraph.datasets import small_line, karate
from cugraph.testing import utils, SMALL_DATASETS
# =============================================================================
# Parameters
# =============================================================================
DIRECTED_GRAPH_OPTIONS = [False, True]
COMPRESSED = [False, True]
LINE = small_line
KARATE = karate
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def _get_param_args(param_name, param_values):
"""
Returns a tuple of (<param_name>, <pytest.param list>) which can be applied
as the args to pytest.mark.parametrize(). The pytest.param list also
contains param id string formed from the param name and values.
"""
return (param_name, [pytest.param(v, id=f"{param_name}={v}") for v in param_values])
def calc_node2vec(G, start_vertices, max_depth, compress_result, p=1.0, q=1.0):
"""
Compute node2vec for each nodes in 'start_vertices'
Parameters
----------
G : cuGraph.Graph or networkx.Graph
start_vertices : int or list or cudf.Series
max_depth : int
compress_result : bool
p : float
q : float
"""
assert G is not None
vertex_paths, edge_weights, vertex_path_sizes = cugraph.node2vec(
G, start_vertices, max_depth, compress_result, p, q
)
return (vertex_paths, edge_weights, vertex_path_sizes), start_vertices
@pytest.mark.sg
@pytest.mark.parametrize(*_get_param_args("graph_file", [KARATE]))
def test_node2vec_invalid(graph_file):
G = graph_file.get_graph(download=True, create_using=cugraph.Graph(directed=True))
k = random.randint(1, 10)
start_vertices = cudf.Series(
random.sample(range(G.number_of_vertices()), k), dtype="int32"
)
compress = True
max_depth = 1
p = 1
q = 1
invalid_max_depths = [None, -1, "1", 4.5]
invalid_pqs = [None, -1, "1"]
invalid_start_vertices = [1.0, "1", 2147483648]
# Tests for invalid max_depth
for bad_depth in invalid_max_depths:
with pytest.raises(ValueError):
df, seeds = calc_node2vec(
G,
start_vertices,
max_depth=bad_depth,
compress_result=compress,
p=p,
q=q,
)
# Tests for invalid p
for bad_p in invalid_pqs:
with pytest.raises(ValueError):
df, seeds = calc_node2vec(
G,
start_vertices,
max_depth=max_depth,
compress_result=compress,
p=bad_p,
q=q,
)
# Tests for invalid q
for bad_q in invalid_pqs:
with pytest.raises(ValueError):
df, seeds = calc_node2vec(
G,
start_vertices,
max_depth=max_depth,
compress_result=compress,
p=p,
q=bad_q,
)
# Tests for invalid start_vertices dtypes, modify when more types are
# supported
for bad_start in invalid_start_vertices:
with pytest.raises(ValueError):
df, seeds = calc_node2vec(
G, bad_start, max_depth=max_depth, compress_result=compress, p=p, q=q
)
@pytest.mark.sg
@pytest.mark.parametrize(*_get_param_args("graph_file", [LINE]))
@pytest.mark.parametrize(*_get_param_args("directed", DIRECTED_GRAPH_OPTIONS))
def test_node2vec_line(graph_file, directed):
G = graph_file.get_graph(
download=True, create_using=cugraph.Graph(directed=directed)
)
max_depth = 3
start_vertices = cudf.Series([0, 3, 6], dtype="int32")
df, seeds = calc_node2vec(
G, start_vertices, max_depth, compress_result=True, p=0.8, q=0.5
)
@pytest.mark.sg
@pytest.mark.parametrize(*_get_param_args("graph_file", SMALL_DATASETS))
@pytest.mark.parametrize(*_get_param_args("directed", DIRECTED_GRAPH_OPTIONS))
@pytest.mark.parametrize(*_get_param_args("compress", COMPRESSED))
def test_node2vec(
graph_file,
directed,
compress,
):
dataset_path = graph_file.get_path()
cu_M = utils.read_csv_file(dataset_path)
G = cugraph.Graph(directed=directed)
G.from_cudf_edgelist(
cu_M, source="0", destination="1", edge_attr="2", renumber=False
)
num_verts = G.number_of_vertices()
k = random.randint(6, 12)
start_vertices = cudf.Series(random.sample(range(num_verts), k), dtype="int32")
max_depth = 5
result, seeds = calc_node2vec(
G, start_vertices, max_depth, compress_result=compress, p=0.8, q=0.5
)
vertex_paths, edge_weights, vertex_path_sizes = result
if compress:
# Paths are coalesced, meaning vertex_path_sizes is nonempty. It's
# necessary to use in order to track starts of paths
assert vertex_paths.size == vertex_path_sizes.sum()
if directed:
# directed graphs may be coalesced at any point
assert vertex_paths.size - k == edge_weights.size
# This part is for checking to make sure each of the edges
# in all of the paths are valid and are accurate
idx = 0
for path_idx in range(vertex_path_sizes.size):
for _ in range(vertex_path_sizes[path_idx] - 1):
weight = edge_weights[idx]
u = vertex_paths[idx + path_idx]
v = vertex_paths[idx + path_idx + 1]
# Corresponding weight to edge is not correct
expr = "(src == {} and dst == {})".format(u, v)
edge_query = G.edgelist.edgelist_df.query(expr)
if edge_query.empty:
raise ValueError("edge_query didn't find:({},{})".format(u, v))
else:
if edge_query["weights"].values[0] != weight:
raise ValueError("edge_query weight incorrect")
idx += 1
else:
# undirected graphs should never be coalesced
assert vertex_paths.size == max_depth * k
assert edge_weights.size == (max_depth - 1) * k
# This part is for checking to make sure each of the edges
# in all of the paths are valid and are accurate
for path_idx in range(k):
for idx in range(max_depth - 1):
weight = edge_weights[path_idx * (max_depth - 1) + idx]
u = vertex_paths[path_idx * max_depth + idx]
v = vertex_paths[path_idx * max_depth + idx + 1]
# Corresponding weight to edge is not correct
expr = "(src == {} and dst == {})".format(u, v)
edge_query = G.edgelist.edgelist_df.query(expr)
if edge_query.empty:
raise ValueError("edge_query didn't find:({},{})".format(u, v))
else:
if edge_query["weights"].values[0] != weight:
raise ValueError("edge_query weight incorrect")
else:
# Paths are padded, meaning a formula can be used to track starts of
# paths. Check that output sizes are as expected
assert vertex_paths.size == max_depth * k
assert edge_weights.size == (max_depth - 1) * k
assert vertex_path_sizes.size == 0
if directed:
blanks = vertex_paths.isna()
# This part is for checking to make sure each of the edges
# in all of the paths are valid and are accurate
for i in range(k):
path_at_end, j = False, 0
weight_idx = 0
while not path_at_end:
src_idx = i * max_depth + j
dst_idx = i * max_depth + j + 1
if directed:
invalid_src = blanks[src_idx] or (src_idx >= num_verts)
invalid_dst = blanks[dst_idx] or (dst_idx >= num_verts)
if invalid_src or invalid_dst:
break
weight = edge_weights[weight_idx]
u = vertex_paths[src_idx]
v = vertex_paths[dst_idx]
# Corresponding weight to edge is not correct
expr = "(src == {} and dst == {})".format(u, v)
edge_query = G.edgelist.edgelist_df.query(expr)
if edge_query.empty:
raise ValueError("edge_query didn't find:({},{})".format(u, v))
else:
if edge_query["weights"].values[0] != weight:
raise ValueError("edge_query weight incorrect")
# Only increment if the current indices are valid
j += 1
weight_idx += 1
if j >= max_depth - 1:
path_at_end = True
# Check that path sizes matches up correctly with paths
if vertex_paths[i * max_depth] != seeds[i]:
raise ValueError(
"vertex_path start did not match seed \
vertex:{}".format(
vertex_paths.values
)
)
@pytest.mark.sg
@pytest.mark.parametrize(*_get_param_args("graph_file", [LINE]))
@pytest.mark.parametrize(*_get_param_args("renumber", [True, False]))
def test_node2vec_renumber_cudf(graph_file, renumber):
dataset_path = graph_file.get_path()
cu_M = cudf.read_csv(
dataset_path, delimiter=" ", dtype=["int32", "int32", "float32"], header=None
)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
cu_M, source="0", destination="1", edge_attr="2", renumber=renumber
)
start_vertices = cudf.Series([8, 0, 7, 1, 6, 2], dtype="int32")
num_seeds = 6
max_depth = 4
df, seeds = calc_node2vec(
G, start_vertices, max_depth, compress_result=False, p=0.8, q=0.5
)
vertex_paths, edge_weights, vertex_path_sizes = df
for i in range(num_seeds):
if vertex_paths[i * max_depth] != seeds[i]:
raise ValueError(
"vertex_path {} start did not match seed \
vertex".format(
vertex_paths.values
)
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/generators/test_rmat.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import cudf
import cugraph
from cugraph.generators import rmat
from cupy.sparse import coo_matrix, triu, tril
import numpy as np
import cupy as cp
##############################################################################
_scale_values = [2, 4, 16]
_scale_test_ids = [f"scale={x}" for x in _scale_values]
_graph_types = [cugraph.Graph, None, int]
_graph_test_ids = [f"create_using={getattr(x,'__name__',str(x))}" for x in _graph_types]
_clip_and_flip = [False, True]
_clip_and_flip_test_ids = [f"clip_and_flip={x}" for x in _clip_and_flip]
_scramble_vertex_ids = [False, True]
_scramble_vertex_ids_test_ids = [
f"scramble_vertex_ids={x}" for x in _scramble_vertex_ids
]
_include_edge_weights = [False, True]
_include_edge_weights_test_ids = [
f"include_edge_weights={x}" for x in _include_edge_weights
]
_dtype = [np.float32, cp.float32, None, "FLOAT64", "float32"]
_dtype_test_ids = [f"_dtype={x}" for x in _dtype]
_min_max_weight_values = [[None, None], [0, 1], [2, 5]]
_min_max_weight_values_test_ids = [
f"min_max_weight_values={x}" for x in _min_max_weight_values
]
_include_edge_ids = [False, True]
_include_edge_ids_test_ids = [f"include_edge_ids={x}" for x in _include_edge_ids]
_include_edge_types = [False, True]
_include_edge_types_test_ids = [f"include_edge_types={x}" for x in _include_edge_types]
_min_max_edge_type_values = [[None, None], [0, 1], [2, 5]]
_min_max_edge_type_values_test_ids = [
f"min_max_edge_type_values={x}" for x in _min_max_edge_type_values
]
def _call_rmat(
scale,
num_edges,
create_using,
clip_and_flip=False,
scramble_vertex_ids=False,
include_edge_weights=False,
dtype=None,
minimum_weight=None,
maximum_weight=None,
include_edge_ids=False,
include_edge_types=False,
min_edge_type_value=None,
max_edge_type_value=None,
mg=False,
):
"""
Simplifies calling RMAT by requiring only specific args that are varied by
these tests and hard-coding all others.
"""
return rmat(
scale=scale,
num_edges=num_edges,
a=0.57, # from Graph500
b=0.19, # from Graph500
c=0.19, # from Graph500
seed=24,
clip_and_flip=clip_and_flip,
scramble_vertex_ids=scramble_vertex_ids,
create_using=create_using,
include_edge_weights=include_edge_weights,
minimum_weight=minimum_weight,
maximum_weight=maximum_weight,
dtype=dtype,
include_edge_ids=include_edge_ids,
include_edge_types=include_edge_types,
min_edge_type_value=min_edge_type_value,
max_edge_type_value=max_edge_type_value,
mg=mg,
)
###############################################################################
@pytest.mark.sg
@pytest.mark.parametrize(
"include_edge_weights", _include_edge_weights, ids=_include_edge_weights_test_ids
)
@pytest.mark.parametrize("dtype", _dtype, ids=_dtype_test_ids)
@pytest.mark.parametrize(
"min_max_weight", _min_max_weight_values, ids=_min_max_weight_values_test_ids
)
@pytest.mark.parametrize(
"scramble_vertex_ids", _scramble_vertex_ids, ids=_scramble_vertex_ids_test_ids
)
def test_rmat_edge_weights(
include_edge_weights, dtype, min_max_weight, scramble_vertex_ids
):
"""
Verifies that the edge weights returned by rmat() are valid. Also verifies that
valid values are passed to 'dtype', 'minimum_weight' and 'maximum_weight'.
"""
scale = 2
num_edges = (2**scale) * 4
create_using = None # Returns the edgelist from RMAT
minimum_weight, maximum_weight = min_max_weight
if include_edge_weights:
if (
minimum_weight is None
or maximum_weight is None
or dtype
not in [
np.float32,
np.float64,
cp.float32,
cp.float64,
"float32",
"float64",
]
):
with pytest.raises(ValueError):
_call_rmat(
scale,
num_edges,
create_using,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_weights=include_edge_weights,
dtype=dtype,
minimum_weight=minimum_weight,
maximum_weight=maximum_weight,
)
else:
df = _call_rmat(
scale,
num_edges,
create_using,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_weights=include_edge_weights,
dtype=dtype,
minimum_weight=minimum_weight,
maximum_weight=maximum_weight,
)
# Check that there is a 'weights' column
assert "weights" in df.columns
edge_weights_err1 = df.query("{} - weights < 0.0001".format(maximum_weight))
edge_weights_err2 = df.query(
"{} - weights > -0.0001".format(minimum_weight)
)
# Check that edge weights values are between 'minimum_weight'
# and 'maximum_weight.
assert len(edge_weights_err1) == 0
assert len(edge_weights_err2) == 0
else:
df = _call_rmat(
scale,
num_edges,
create_using,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_weights=include_edge_weights,
dtype=dtype,
minimum_weight=minimum_weight,
maximum_weight=maximum_weight,
)
assert len(df.columns) == 2
@pytest.mark.sg
@pytest.mark.parametrize("scale", _scale_values, ids=_scale_test_ids)
@pytest.mark.parametrize(
"include_edge_ids", _include_edge_ids, ids=_include_edge_ids_test_ids
)
@pytest.mark.parametrize(
"scramble_vertex_ids", _scramble_vertex_ids, ids=_scramble_vertex_ids_test_ids
)
def test_rmat_edge_ids(scale, include_edge_ids, scramble_vertex_ids):
"""
Verifies that the edge ids returned by rmat() are valid.
"""
num_edges = (2**scale) * 4
create_using = None # Returns the edgelist from RMAT
df = _call_rmat(
scale,
num_edges,
create_using,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_ids=include_edge_ids,
)
if include_edge_ids:
assert "edge_id" in df.columns
df["index"] = df.index
edge_id_err = df.query("index != edge_id")
assert len(edge_id_err) == 0
else:
assert len(df.columns) == 2
@pytest.mark.sg
@pytest.mark.parametrize(
"include_edge_types",
_include_edge_types,
ids=_include_edge_types_test_ids,
)
@pytest.mark.parametrize(
"min_max_edge_type_value",
_min_max_edge_type_values,
ids=_min_max_edge_type_values_test_ids,
)
@pytest.mark.parametrize(
"scramble_vertex_ids", _scramble_vertex_ids, ids=_scramble_vertex_ids_test_ids
)
def test_rmat_edge_types(
include_edge_types, min_max_edge_type_value, scramble_vertex_ids
):
"""
Verifies that the edge types returned by rmat() are valid and that valid values
are passed for 'min_edge_type_value' and 'max_edge_type_value'.
"""
scale = 2
num_edges = (2**scale) * 4
create_using = None # Returns the edgelist from RMAT
min_edge_type_value, max_edge_type_value = min_max_edge_type_value
if include_edge_types:
if min_edge_type_value is None or max_edge_type_value is None:
with pytest.raises(ValueError):
_call_rmat(
scale,
num_edges,
create_using,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_types=include_edge_types,
min_edge_type_value=min_edge_type_value,
max_edge_type_value=max_edge_type_value,
)
else:
df = _call_rmat(
scale,
num_edges,
create_using,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_types=include_edge_types,
min_edge_type_value=min_edge_type_value,
max_edge_type_value=max_edge_type_value,
)
# Check that there is an 'edge_type' column
assert "edge_type" in df.columns
edge_types_err1 = df.query("{} < edge_type".format(max_edge_type_value))
edge_types_err2 = df.query("{} > edge_type".format(min_edge_type_value))
# Check that edge weights values are between 'min_edge_type_value'
# and 'max_edge_type_value'.
assert len(edge_types_err1) == 0
assert len(edge_types_err2) == 0
else:
df = _call_rmat(
scale,
num_edges,
create_using,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_types=include_edge_types,
min_edge_type_value=min_edge_type_value,
max_edge_type_value=max_edge_type_value,
)
assert len(df.columns) == 2
@pytest.mark.sg
@pytest.mark.parametrize("scale", [2, 4, 8], ids=_scale_test_ids)
@pytest.mark.parametrize(
"include_edge_weights", _include_edge_weights, ids=_include_edge_weights_test_ids
)
@pytest.mark.parametrize("clip_and_flip", _clip_and_flip, ids=_clip_and_flip_test_ids)
def test_rmat_clip_and_flip(scale, include_edge_weights, clip_and_flip):
"""
Verifies that there are edges only in the lower triangular part of
the adjacency matrix when 'clip_and_flip' is set to 'true'.
Note: 'scramble_vertex_ids' nullifies the effect of 'clip_and_flip' therefore
both flags should not be set to 'True' in order to test the former
"""
num_edges = (2**scale) * 4
create_using = None # Returns the edgelist from RMAT
minimum_weight = 0
maximum_weight = 1
dtype = np.float32
df = _call_rmat(
scale,
num_edges,
create_using,
clip_and_flip=clip_and_flip,
scramble_vertex_ids=False,
include_edge_weights=include_edge_weights,
dtype=dtype,
minimum_weight=minimum_weight,
maximum_weight=maximum_weight,
)
if not include_edge_weights:
df["weights"] = 1
# cupy coo_matrix only support 'float32', 'float64', 'complex64'
# and 'complex128'.
df["weights"] = df["weights"].astype("float32")
dim = df[["src", "dst"]].max().max() + 1
src = df["src"].to_cupy()
dst = df["dst"].to_cupy()
weights = df["weights"].to_cupy()
adj_matrix = coo_matrix((weights, (src, dst)), shape=(dim, dim)).toarray()
upper_coo = triu(adj_matrix)
diag = tril(upper_coo)
if clip_and_flip:
# Except the diagonal, There should be no edge in the upper triangular part of
# the graph adjacency matrix.
assert diag.nnz == upper_coo.nnz
@pytest.mark.sg
@pytest.mark.parametrize("graph_type", _graph_types, ids=_graph_test_ids)
def test_rmat_return_type(graph_type):
"""
Verifies that the return type returned by rmat() is valid (or the proper
exception is raised) based on inputs.
"""
scale = 2
num_edges = (2**scale) * 4
if graph_type not in [cugraph.Graph, None]:
with pytest.raises(TypeError):
_call_rmat(scale, num_edges, graph_type)
else:
G_or_df = _call_rmat(scale, num_edges, graph_type)
if graph_type is None:
assert type(G_or_df) is cudf.DataFrame
else:
assert type(G_or_df) is graph_type
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/generators/test_rmat_mg.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import dask_cudf
import cugraph
from cugraph.generators import rmat
from cugraph.testing.mg_utils import (
start_dask_client,
stop_dask_client,
)
from cugraph.dask.common.mg_utils import (
is_single_gpu,
)
##############################################################################
_cluster = None
_client = None
_is_single_gpu = is_single_gpu()
_visible_devices = None
_scale_values = [2, 4, 16]
_scale_test_ids = [f"scale={x}" for x in _scale_values]
_graph_types = [cugraph.Graph, None, int]
_graph_test_ids = [f"create_using={getattr(x,'__name__',str(x))}" for x in _graph_types]
def _call_rmat(scale, num_edges, create_using, mg=True):
"""
Simplifies calling RMAT by requiring only specific args that are varied by
these tests and hard-coding all others.
"""
return rmat(
scale=scale,
num_edges=num_edges,
a=0.57, # from Graph500
b=0.19, # from Graph500
c=0.19, # from Graph500
seed=24,
clip_and_flip=False,
scramble_vertex_ids=True,
create_using=create_using,
mg=mg,
)
###############################################################################
def setup_module():
global _cluster
global _client
global _visible_devices
if not _is_single_gpu:
(_client, _cluster) = start_dask_client()
_visible_devices = _client.scheduler_info()["workers"]
def teardown_module():
if not _is_single_gpu:
stop_dask_client(_client, _cluster)
###############################################################################
@pytest.mark.mg
@pytest.mark.filterwarnings("ignore:make_current is deprecated:DeprecationWarning")
@pytest.mark.parametrize("scale", _scale_values, ids=_scale_test_ids)
def test_rmat_edgelist(scale):
"""
Verifies that the edgelist returned by rmat() is valid based on inputs.
"""
if _is_single_gpu:
pytest.skip("skipping MG testing on Single GPU system")
num_edges = (2**scale) * 4
create_using = None # Returns the edgelist from RMAT
df = _call_rmat(scale, num_edges, create_using)
assert df.npartitions == len(_visible_devices)
df = df.compute()
assert len(df) == num_edges
@pytest.mark.mg
@pytest.mark.filterwarnings("ignore:make_current is deprecated:DeprecationWarning")
@pytest.mark.parametrize("graph_type", _graph_types, ids=_graph_test_ids)
def test_rmat_return_type(graph_type):
"""
Verifies that the return type returned by rmat() is valid (or the proper
exception is raised) based on inputs.
"""
if _is_single_gpu:
pytest.skip("skipping MG testing on Single GPU system")
scale = 2
num_edges = (2**scale) * 4
if graph_type not in [cugraph.Graph, None]:
with pytest.raises(TypeError):
_call_rmat(scale, num_edges, graph_type)
else:
G_or_df = _call_rmat(scale, num_edges, graph_type)
if graph_type is None:
assert type(G_or_df) is dask_cudf.DataFrame
else:
assert type(G_or_df) is graph_type
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/tree/test_minimum_spanning_tree.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import gc
import rmm
import cudf
import pytest
import numpy as np
import networkx as nx
import cugraph
from cugraph.testing import utils
from cugraph.datasets import netscience
print("Networkx version : {} ".format(nx.__version__))
UNDIRECTED_WEIGHTED_DATASET = [netscience]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def _get_param_args(param_name, param_values):
"""
Returns a tuple of (<param_name>, <pytest.param list>) which can be applied
as the args to pytest.mark.parametrize(). The pytest.param list also
contains param id string formed from the param name and values.
"""
return (param_name, [pytest.param(v, id=f"{param_name}={v}") for v in param_values])
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_WEIGHTED_DATASET)
def test_minimum_spanning_tree_nx(graph_file):
# cugraph
G = graph_file.get_graph()
G.edgelist.edgelist_df["weights"] = G.edgelist.edgelist_df["weights"].astype(
"float64"
)
# Just for getting relevant timing
G.view_adj_list()
t1 = time.time()
cugraph_mst = cugraph.minimum_spanning_tree(G)
t2 = time.time() - t1
print("CuGraph time : " + str(t2))
# Nx
dataset_path = graph_file.get_path()
df = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
df, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
t1 = time.time()
mst_nx = nx.minimum_spanning_tree(Gnx)
t2 = time.time() - t1
print("Nx Time : " + str(t2))
utils.compare_mst(cugraph_mst, mst_nx)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_WEIGHTED_DATASET)
@pytest.mark.parametrize(*_get_param_args("use_adjlist", [True, False]))
def test_minimum_spanning_tree_graph_repr_compat(graph_file, use_adjlist):
G = graph_file.get_graph()
# read_weights_in_sp=False => value column dtype is float64
G.edgelist.edgelist_df["weights"] = G.edgelist.edgelist_df["weights"].astype(
"float64"
)
if use_adjlist:
G.view_adj_list()
cugraph.minimum_spanning_tree(G)
DATASETS_SIZES = [
100000,
1000000,
10000000,
100000000,
]
@pytest.mark.sg
@pytest.mark.skip(reason="Skipping large tests")
@pytest.mark.parametrize("graph_size", DATASETS_SIZES)
def test_random_minimum_spanning_tree_nx(graph_size):
rmm.reinitialize(managed_memory=True)
df = utils.random_edgelist(
e=graph_size,
ef=16,
dtypes={"src": np.int32, "dst": np.int32, "weight": float},
drop_duplicates=True,
seed=123456,
)
gdf = cudf.from_pandas(df)
# cugraph
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source="src", destination="dst", edge_attr="weight")
# Just for getting relevant timing
G.view_adj_list()
t1 = time.time()
cugraph.minimum_spanning_tree(G)
t2 = time.time() - t1
print("CuGraph time : " + str(t2))
# Nx
Gnx = nx.from_pandas_edgelist(
df,
create_using=nx.Graph(),
source="src",
target="dst",
edge_attr="weight",
)
t1 = time.time()
nx.minimum_spanning_tree(Gnx)
t3 = time.time() - t1
print("Nx Time : " + str(t3))
print("Speedup: " + str(t3 / t2))
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/tree/test_maximum_spanning_tree.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import gc
import rmm
import pytest
import numpy as np
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils
from cugraph.datasets import netscience
print("Networkx version : {} ".format(nx.__version__))
UNDIRECTED_WEIGHTED_DATASET = [netscience]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def _get_param_args(param_name, param_values):
"""
Returns a tuple of (<param_name>, <pytest.param list>) which can be applied
as the args to pytest.mark.parametrize(). The pytest.param list also
contains param id string formed from the param name and values.
"""
return (param_name, [pytest.param(v, id=f"{param_name}={v}") for v in param_values])
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_WEIGHTED_DATASET)
def test_maximum_spanning_tree_nx(graph_file):
# cugraph
G = graph_file.get_graph()
# read_weights_in_sp=False => value column dtype is float64
G.edgelist.edgelist_df["weights"] = G.edgelist.edgelist_df["weights"].astype(
"float64"
)
# Just for getting relevant timing
G.view_adj_list()
t1 = time.time()
cugraph_mst = cugraph.maximum_spanning_tree(G)
t2 = time.time() - t1
print("CuGraph time : " + str(t2))
# Nx
dataset_path = graph_file.get_path()
df = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
df, create_using=nx.Graph(), source="0", target="1", edge_attr="weight"
)
t1 = time.time()
mst_nx = nx.maximum_spanning_tree(Gnx)
t2 = time.time() - t1
print("Nx Time : " + str(t2))
utils.compare_mst(cugraph_mst, mst_nx)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_WEIGHTED_DATASET)
@pytest.mark.parametrize(*_get_param_args("use_adjlist", [True, False]))
def test_maximum_spanning_tree_graph_repr_compat(graph_file, use_adjlist):
G = graph_file.get_graph()
# read_weights_in_sp=False => value column dtype is float64
G.edgelist.edgelist_df["weights"] = G.edgelist.edgelist_df["weights"].astype(
"float64"
)
if use_adjlist:
G.view_adj_list()
cugraph.maximum_spanning_tree(G)
DATASETS_SIZES = [
100000,
1000000,
10000000,
100000000,
]
@pytest.mark.sg
@pytest.mark.skip(reason="Skipping large tests")
@pytest.mark.parametrize("graph_size", DATASETS_SIZES)
def test_random_maximum_spanning_tree_nx(graph_size):
rmm.reinitialize(managed_memory=True)
df = utils.random_edgelist(
e=graph_size,
ef=16,
dtypes={"src": np.int32, "dst": np.int32, "weight": float},
drop_duplicates=True,
seed=123456,
)
gdf = cudf.from_pandas(df)
# cugraph
G = cugraph.Graph()
G.from_cudf_edgelist(gdf, source="src", destination="dst", edge_attr="weight")
# Just for getting relevant timing
G.view_adj_list()
t1 = time.time()
cugraph.maximum_spanning_tree(G)
t2 = time.time() - t1
print("CuGraph time : " + str(t2))
# Nx
Gnx = nx.from_pandas_edgelist(
df,
create_using=nx.Graph(),
source="src",
target="dst",
edge_attr="weight",
)
t1 = time.time()
nx.maximum_spanning_tree(Gnx)
t3 = time.time() - t1
print("Nx Time : " + str(t3))
print("Speedup: " + str(t3 / t2))
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_edge_betweenness_centrality_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import dask_cudf
from pylibcugraph.testing.utils import gen_fixture_params_product
from cugraph.datasets import karate, dolphins
import cugraph
import cugraph.dask as dcg
# from cugraph.dask.common.mg_utils import is_single_gpu
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
INCLUDE_WEIGHTS = [False, True]
INCLUDE_EDGE_IDS = [False, True]
NORMALIZED_OPTIONS = [False, True]
SUBSET_SIZE_OPTIONS = [4, None]
# email_Eu_core is too expensive to test
datasets = [karate, dolphins]
# =============================================================================
# Pytest fixtures
# =============================================================================
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
(INCLUDE_WEIGHTS, "include_weights"),
(INCLUDE_EDGE_IDS, "include_edgeids"),
(NORMALIZED_OPTIONS, "normalized"),
(SUBSET_SIZE_OPTIONS, "subset_size"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(
(
"graph_file",
"directed",
"include_weights",
"include_edge_ids",
"normalized",
"subset_size",
"subset_seed",
),
request.param,
)
)
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the edge
betweenness centrality algo.
(based on cuGraph edge betweenness centrality) which can be used
for validation.
"""
directed = input_combo["directed"]
normalized = input_combo["normalized"]
k = input_combo["subset_size"]
subset_seed = 42
edge_ids = input_combo["include_edge_ids"]
weight = input_combo["include_weights"]
df = input_combo["graph_file"].get_edgelist()
if edge_ids:
if not directed:
# Edge ids not supported for undirected graph
return
dtype = df.dtypes[0]
edge_id = "edge_id"
df["edge_id"] = df.index
df = df.astype(dtype)
else:
edge_id = None
G = cugraph.Graph(directed=directed)
G.from_cudf_edgelist(
df, source="src", destination="dst", weight="wgt", edge_id=edge_id
)
if isinstance(k, int):
k = G.select_random_vertices(subset_seed, k)
input_combo["k"] = k
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
sg_cugraph_edge_bc = (
cugraph.edge_betweenness_centrality(G, k, normalized)
.sort_values(["src", "dst"])
.reset_index(drop=True)
)
input_data_path = input_combo["graph_file"].get_path()
input_combo["sg_cugraph_results"] = sg_cugraph_edge_bc
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
if weight:
weight = ddf
else:
weight = None
if edge_ids:
dtype = ddf.dtypes[0]
edge_id = "edge_id"
ddf = ddf.assign(idx=1)
ddf["edge_id"] = ddf.idx.cumsum().astype(dtype) - 1
else:
edge_id = None
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
weight="value",
edge_id=edge_id,
renumber=True,
)
input_combo["MGGraph"] = dg
input_combo["include_weights"] = weight
return input_combo
# =============================================================================
# Tests
# =============================================================================
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
def test_dask_mg_edge_betweenness_centrality(
dask_client, benchmark, input_expected_output
):
if input_expected_output is not None:
dg = input_expected_output["MGGraph"]
k = input_expected_output["k"]
normalized = input_expected_output["normalized"]
weight = input_expected_output["include_weights"]
if weight is not None:
with pytest.raises(NotImplementedError):
result_edge_bc = benchmark(
dcg.edge_betweenness_centrality, dg, k, normalized, weight=weight
)
else:
result_edge_bc = benchmark(
dcg.edge_betweenness_centrality, dg, k, normalized, weight=weight
)
result_edge_bc = (
result_edge_bc.compute()
.sort_values(["src", "dst"])
.reset_index(drop=True)
.rename(columns={"betweenness_centrality": "mg_betweenness_centrality"})
)
if len(result_edge_bc.columns) > 3:
result_edge_bc = result_edge_bc.rename(
columns={"edge_id": "mg_edge_id"}
)
expected_output = input_expected_output["sg_cugraph_results"].reset_index(
drop=True
)
result_edge_bc["betweenness_centrality"] = expected_output[
"betweenness_centrality"
]
if len(expected_output.columns) > 3:
result_edge_bc["edge_id"] = expected_output["edge_id"]
edge_id_diff = result_edge_bc.query("mg_edge_id != edge_id")
assert len(edge_id_diff) == 0
edge_bc_diffs1 = result_edge_bc.query(
"mg_betweenness_centrality - betweenness_centrality > 0.01"
)
edge_bc_diffs2 = result_edge_bc.query(
"betweenness_centrality - mg_betweenness_centrality < -0.01"
)
assert len(edge_bc_diffs1) == 0
assert len(edge_bc_diffs2) == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_katz_centrality.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.testing import (
utils,
DEFAULT_DATASETS,
UNDIRECTED_DATASETS,
)
from cugraph.datasets import toy_graph_undirected, karate
# This toy graph is used in multiple tests throughout libcugraph_c and pylib.
TOY = toy_graph_undirected
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def topKVertices(katz, col, k):
top = katz.nlargest(n=k, columns=col)
top = top.sort_values(by=col, ascending=False)
return top["vertex"]
def calc_katz(graph_file):
G = graph_file.get_graph(
create_using=cugraph.Graph(directed=True), ignore_weights=True
)
degree_max = G.degree()["degree"].max()
katz_alpha = 1 / (degree_max)
k_df = cugraph.katz_centrality(G, alpha=None, max_iter=1000)
k_df = k_df.sort_values("vertex").reset_index(drop=True)
dataset_path = graph_file.get_path()
NM = utils.read_csv_for_nx(dataset_path)
Gnx = nx.from_pandas_edgelist(NM, create_using=nx.DiGraph(), source="0", target="1")
nk = nx.katz_centrality(Gnx, alpha=katz_alpha)
pdf = [nk[k] for k in sorted(nk.keys())]
k_df["nx_katz"] = pdf
k_df = k_df.rename(columns={"katz_centrality": "cu_katz"}, copy=False)
return k_df
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_katz_centrality(graph_file):
katz_scores = calc_katz(graph_file)
topKNX = topKVertices(katz_scores, "nx_katz", 10)
topKCU = topKVertices(katz_scores, "cu_katz", 10)
assert topKNX.equals(topKCU)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_katz_centrality_nx(graph_file):
dataset_path = graph_file.get_path()
NM = utils.read_csv_for_nx(dataset_path)
Gnx = nx.from_pandas_edgelist(
NM,
create_using=nx.DiGraph(),
source="0",
target="1",
)
G = cugraph.utilities.convert_from_nx(Gnx)
degree_max = G.degree()["degree"].max()
katz_alpha = 1 / (degree_max)
nk = nx.katz_centrality(Gnx, alpha=katz_alpha)
ck = cugraph.katz_centrality(Gnx, alpha=None, max_iter=1000)
# Calculating mismatch
nk = sorted(nk.items(), key=lambda x: x[0])
ck = sorted(ck.items(), key=lambda x: x[0])
err = 0
assert len(ck) == len(nk)
for i in range(len(ck)):
if abs(ck[i][1] - nk[i][1]) > 0.1 and ck[i][0] == nk[i][0]:
err = err + 1
print("Mismatches:", err)
assert err < (0.1 * len(ck))
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_katz_centrality_multi_column(graph_file):
dataset_path = graph_file.get_path()
cu_M = utils.read_csv_file(dataset_path)
cu_M.rename(columns={"0": "src_0", "1": "dst_0"}, inplace=True)
cu_M["src_1"] = cu_M["src_0"] + 1000
cu_M["dst_1"] = cu_M["dst_0"] + 1000
G1 = cugraph.Graph(directed=True)
G1.from_cudf_edgelist(
cu_M,
source=["src_0", "src_1"],
destination=["dst_0", "dst_1"],
store_transposed=True,
)
G2 = cugraph.Graph(directed=True)
G2.from_cudf_edgelist(
cu_M, source="src_0", destination="dst_0", store_transposed=True
)
k_df_exp = cugraph.katz_centrality(G2, alpha=None, max_iter=1000)
k_df_exp = k_df_exp.sort_values("vertex").reset_index(drop=True)
nstart = cudf.DataFrame()
nstart["vertex_0"] = k_df_exp["vertex"]
nstart["vertex_1"] = nstart["vertex_0"] + 1000
nstart["values"] = k_df_exp["katz_centrality"]
k_df_res = cugraph.katz_centrality(G1, nstart=nstart, alpha=None, max_iter=1000)
k_df_res = k_df_res.sort_values("0_vertex").reset_index(drop=True)
k_df_res.rename(columns={"0_vertex": "vertex"}, inplace=True)
top_res = topKVertices(k_df_res, "katz_centrality", 10)
top_exp = topKVertices(k_df_exp, "katz_centrality", 10)
assert top_res.equals(top_exp)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", [TOY])
def test_katz_centrality_toy(graph_file):
# This test is based off of libcugraph_c and pylibcugraph tests
G = graph_file.get_graph(create_using=cugraph.Graph(directed=True), download=True)
alpha = 0.01
beta = 1.0
tol = 0.000001
max_iter = 1000
centralities = [0.410614, 0.403211, 0.390689, 0.415175, 0.395125, 0.433226]
ck = cugraph.katz_centrality(G, alpha=alpha, beta=beta, tol=tol, max_iter=max_iter)
ck = ck.sort_values("vertex")
for vertex in ck["vertex"].to_pandas():
expected_score = centralities[vertex]
actual_score = ck["katz_centrality"].iloc[vertex]
assert pytest.approx(expected_score, abs=1e-2) == actual_score, (
f"Katz centrality score is {actual_score}, should have"
f"been {expected_score}"
)
@pytest.mark.sg
def test_katz_centrality_transposed_false():
G = karate.get_graph(create_using=cugraph.Graph(directed=True))
warning_msg = (
"Katz centrality expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
cugraph.katz_centrality(G)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_katz_centrality_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cudf
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_katz_centrality(dask_client, directed):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=True)
degree_max = dg.degree()["degree"].max().compute()
katz_alpha = 1 / (degree_max)
mg_res = dcg.katz_centrality(dg, alpha=katz_alpha, tol=1e-6)
mg_res = mg_res.compute()
import networkx as nx
from cugraph.testing import utils
NM = utils.read_csv_for_nx(input_data_path)
if directed:
Gnx = nx.from_pandas_edgelist(
NM, create_using=nx.DiGraph(), source="0", target="1"
)
else:
Gnx = nx.from_pandas_edgelist(
NM, create_using=nx.Graph(), source="0", target="1"
)
nk = nx.katz_centrality(Gnx, alpha=katz_alpha)
import pandas as pd
pdf = pd.DataFrame(nk.items(), columns=["vertex", "katz_centrality"])
exp_res = cudf.DataFrame(pdf)
err = 0
tol = 1.0e-05
compare_res = exp_res.merge(mg_res, on="vertex", suffixes=["_local", "_dask"])
for i in range(len(compare_res)):
diff = abs(
compare_res["katz_centrality_local"].iloc[i]
- compare_res["katz_centrality_dask"].iloc[i]
)
if diff > tol * 1.1:
err = err + 1
assert err == 0
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_katz_centrality_nstart(dask_client, directed):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=True)
mg_res = dcg.katz_centrality(dg, max_iter=50, tol=1e-6)
mg_res = mg_res.compute()
estimate = mg_res.copy()
estimate = estimate.rename(
columns={"vertex": "vertex", "katz_centrality": "values"}
)
estimate["values"] = 0.5
mg_estimate_res = dcg.katz_centrality(dg, nstart=estimate, max_iter=50, tol=1e-6)
mg_estimate_res = mg_estimate_res.compute()
err = 0
tol = 1.0e-05
compare_res = mg_res.merge(
mg_estimate_res, on="vertex", suffixes=["_dask", "_nstart"]
)
for i in range(len(compare_res)):
diff = abs(
compare_res["katz_centrality_dask"].iloc[i]
- compare_res["katz_centrality_nstart"].iloc[i]
)
if diff > tol * 1.1:
err = err + 1
assert err == 0
@pytest.mark.mg
def test_dask_mg_katz_centrality_transposed_false(dask_client):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=False)
warning_msg = (
"Katz centrality expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
dcg.katz_centrality(dg)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_eigenvector_centrality.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS, DEFAULT_DATASETS
from cugraph.datasets import toy_graph, karate
# This toy graph is used in multiple tests throughout libcugraph_c and pylib.
TOY = toy_graph
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def topKVertices(eigen, col, k):
top = eigen.nlargest(n=k, columns=col)
top = top.sort_values(by=col, ascending=False)
return top["vertex"]
def calc_eigenvector(graph_file):
dataset_path = graph_file.get_path()
G = graph_file.get_graph(
download=True, create_using=cugraph.Graph(directed=True), ignore_weights=True
)
k_df = cugraph.eigenvector_centrality(G, max_iter=1000)
k_df = k_df.sort_values("vertex").reset_index(drop=True)
NM = utils.read_csv_for_nx(dataset_path)
Gnx = nx.from_pandas_edgelist(NM, create_using=nx.DiGraph(), source="0", target="1")
nk = nx.eigenvector_centrality(Gnx)
pdf = [nk[k] for k in sorted(nk.keys())]
k_df["nx_eigen"] = pdf
k_df = k_df.rename(columns={"eigenvector_centrality": "cu_eigen"}, copy=False)
return k_df
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_eigenvector_centrality(graph_file):
eigen_scores = calc_eigenvector(graph_file)
topKNX = topKVertices(eigen_scores, "nx_eigen", 10)
topKCU = topKVertices(eigen_scores, "cu_eigen", 10)
assert topKNX.equals(topKCU)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_eigenvector_centrality_nx(graph_file):
dataset_path = graph_file.get_path()
NM = utils.read_csv_for_nx(dataset_path)
Gnx = nx.from_pandas_edgelist(
NM,
create_using=nx.DiGraph(),
source="0",
target="1",
)
nk = nx.eigenvector_centrality(Gnx)
ck = cugraph.eigenvector_centrality(Gnx)
# Calculating mismatch
nk = sorted(nk.items(), key=lambda x: x[0])
ck = sorted(ck.items(), key=lambda x: x[0])
err = 0
assert len(ck) == len(nk)
for i in range(len(ck)):
if abs(ck[i][1] - nk[i][1]) > 0.1 and ck[i][0] == nk[i][0]:
err = err + 1
print("Mismatches:", err)
assert err < (0.1 * len(ck))
# TODO: Uncomment this test when/if nstart is supported for eigen centrality
"""
@pytest.mark.parametrize("graph_file", utils.DATASETS_UNDIRECTED)
def test_eigenvector_centrality_multi_column(graph_file):
cu_M = utils.read_csv_file(graph_file)
cu_M.rename(columns={'0': 'src_0', '1': 'dst_0'}, inplace=True)
cu_M['src_1'] = cu_M['src_0'] + 1000
cu_M['dst_1'] = cu_M['dst_0'] + 1000
G1 = cugraph.Graph(directed=True)
G1.from_cudf_edgelist(cu_M, source=["src_0", "src_1"],
destination=["dst_0", "dst_1"],
store_transposed=True)
G2 = cugraph.Graph(directed=True)
G2.from_cudf_edgelist(
cu_M, source="src_0", destination="dst_0", store_transposed=True)
k_df_exp = cugraph.eigenvector_centrality(G2)
k_df_exp = k_df_exp.sort_values("vertex").reset_index(drop=True)
nstart = cudf.DataFrame()
nstart['vertex_0'] = k_df_exp['vertex']
nstart['vertex_1'] = nstart['vertex_0'] + 1000
nstart['values'] = k_df_exp['eigenvector_centrality']
k_df_res = cugraph.eigenvector_centrality(G1, nstart=nstart)
k_df_res = k_df_res.sort_values("0_vertex").reset_index(drop=True)
k_df_res.rename(columns={'0_vertex': 'vertex'}, inplace=True)
top_res = topKVertices(k_df_res, "eigenvector_centrality", 10)
top_exp = topKVertices(k_df_exp, "eigenvector_centrality", 10)
assert top_res.equals(top_exp)
"""
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", [TOY])
def test_eigenvector_centrality_toy(graph_file):
# This test is based off of libcugraph_c and pylibcugraph tests
G = graph_file.get_graph(download=True, create_using=cugraph.Graph(directed=True))
tol = 1e-6
max_iter = 200
centralities = [0.236325, 0.292055, 0.458457, 0.60533, 0.190498, 0.495942]
ck = cugraph.eigenvector_centrality(G, tol=tol, max_iter=max_iter)
ck = ck.sort_values("vertex")
for vertex in ck["vertex"].to_pandas():
expected_score = centralities[vertex]
actual_score = ck["eigenvector_centrality"].iloc[vertex]
assert pytest.approx(expected_score, abs=1e-4) == actual_score, (
f"Eigenvector centrality score is {actual_score}, should have"
f" been {expected_score}"
)
@pytest.mark.sg
def test_eigenvector_centrality_transposed_false():
G = karate.get_graph(create_using=cugraph.Graph(directed=True))
warning_msg = (
"Eigenvector centrality expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
cugraph.eigenvector_centrality(G)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import dask_cudf
import cupy
import cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
from pylibcugraph.testing import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
([False, True], "normalized"),
([False, True], "endpoints"),
([42, None], "subset_seed"),
([None, 15], "subset_size"),
(IS_DIRECTED, "directed"),
([list, cudf], "vertex_list_type"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(
(
"graph_file",
"normalized",
"endpoints",
"subset_seed",
"subset_size",
"directed",
"vertex_list_type",
),
request.param,
)
)
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the
betweenness_centrality algo based on cuGraph betweenness_centrality) which can
be used for validation.
"""
input_data_path = input_combo["graph_file"]
normalized = input_combo["normalized"]
endpoints = input_combo["endpoints"]
random_state = input_combo["subset_seed"]
subset_size = input_combo["subset_size"]
directed = input_combo["directed"]
vertex_list_type = input_combo["vertex_list_type"]
G = utils.generate_cugraph_graph_from_file(input_data_path, directed=directed)
if subset_size is None:
k = subset_size
elif isinstance(subset_size, int):
# Select random vertices
k = G.select_random_vertices(
random_state=random_state, num_vertices=subset_size
)
if vertex_list_type is list:
k = k.to_arrow().to_pylist()
print("the seeds are \n", k)
if vertex_list_type is int:
# This internally sample k vertices in betweenness centrality.
# Since the nodes that will be sampled by each implementation will
# be random, therefore sample all vertices which will make the test
# consistent.
k = len(G.nodes())
input_combo["k"] = k
sg_cugraph_bc = cugraph.betweenness_centrality(
G, k=k, normalized=normalized, endpoints=endpoints, random_state=random_state
)
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
sg_cugraph_bc = sg_cugraph_bc.sort_values("vertex").reset_index(drop=True)
input_combo["sg_cugraph_results"] = sg_cugraph_bc
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
store_transposed=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
def test_dask_mg_betweenness_centrality(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
k = input_expected_output["k"]
endpoints = input_expected_output["endpoints"]
normalized = input_expected_output["normalized"]
random_state = input_expected_output["subset_seed"]
mg_bc_results = benchmark(
dcg.betweenness_centrality,
dg,
k=k,
normalized=normalized,
endpoints=endpoints,
random_state=random_state,
)
mg_bc_results = (
mg_bc_results.compute().sort_values("vertex").reset_index(drop=True)
)["betweenness_centrality"].to_cupy()
sg_bc_results = (
input_expected_output["sg_cugraph_results"]
.sort_values("vertex")
.reset_index(drop=True)
)["betweenness_centrality"].to_cupy()
diff = cupy.isclose(mg_bc_results, sg_bc_results)
assert diff.all()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_degree_centrality.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def topKVertices(degree, col, k):
top = degree.nlargest(n=k, columns=col)
top = top.sort_values(by=col, ascending=False)
return top["vertex"]
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_degree_centrality_nx(graph_file):
dataset_path = graph_file.get_path()
NM = utils.read_csv_for_nx(dataset_path)
Gnx = nx.from_pandas_edgelist(
NM,
create_using=nx.DiGraph(),
source="0",
target="1",
)
G = cugraph.utilities.convert_from_nx(Gnx)
nk = nx.degree_centrality(Gnx)
ck = cugraph.degree_centrality(G)
# Calculating mismatch
nk = sorted(nk.items(), key=lambda x: x[0])
ck = ck.sort_values("vertex")
ck.index = ck["vertex"]
ck = ck["degree_centrality"]
err = 0
assert len(ck) == len(nk)
for i in range(len(ck)):
if abs(ck[i] - nk[i][1]) > 0.1 and ck.index[i] == nk[i][0]:
err = err + 1
print("Mismatches:", err)
assert err < (0.1 * len(ck))
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_degree_centrality_multi_column(graph_file):
dataset_path = graph_file.get_path()
cu_M = utils.read_csv_file(dataset_path)
cu_M.rename(columns={"0": "src_0", "1": "dst_0"}, inplace=True)
cu_M["src_1"] = cu_M["src_0"] + 1000
cu_M["dst_1"] = cu_M["dst_0"] + 1000
G1 = cugraph.Graph(directed=True)
G1.from_cudf_edgelist(
cu_M, source=["src_0", "src_1"], destination=["dst_0", "dst_1"]
)
G2 = cugraph.Graph(directed=True)
G2.from_cudf_edgelist(cu_M, source="src_0", destination="dst_0")
k_df_exp = cugraph.degree_centrality(G2)
k_df_exp = k_df_exp.sort_values("vertex").reset_index(drop=True)
nstart = cudf.DataFrame()
nstart["vertex_0"] = k_df_exp["vertex"]
nstart["vertex_1"] = nstart["vertex_0"] + 1000
nstart["values"] = k_df_exp["degree_centrality"]
k_df_res = cugraph.degree_centrality(G1)
k_df_res = k_df_res.sort_values("0_vertex").reset_index(drop=True)
k_df_res.rename(columns={"0_vertex": "vertex"}, inplace=True)
top_res = topKVertices(k_df_res, "degree_centrality", 10)
top_exp = topKVertices(k_df_exp, "degree_centrality", 10)
assert top_res.equals(top_exp)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_betweenness_centrality.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import random
import numpy as np
import networkx as nx
import cudf
import cupy
import cugraph
from cugraph.datasets import karate_disjoint
from cugraph.testing import utils, SMALL_DATASETS
# =============================================================================
# Parameters
# =============================================================================
DIRECTED_GRAPH_OPTIONS = [False, True]
WEIGHTED_GRAPH_OPTIONS = [False, True]
ENDPOINTS_OPTIONS = [False, True]
NORMALIZED_OPTIONS = [False, True]
DEFAULT_EPSILON = 0.0001
SUBSET_SIZE_OPTIONS = [4, None]
SUBSET_SEED_OPTIONS = [42]
# NOTE: The following is not really being exploited in the tests as the
# datasets that are used are too small to compare, but it ensures that both
# path are actually sane
RESULT_DTYPE_OPTIONS = [np.float32, np.float64]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Comparison functions
# =============================================================================
def calc_betweenness_centrality(
graph_file,
directed=True,
k=None,
normalized=False,
weight=None,
endpoints=False,
seed=None,
result_dtype=np.float64,
use_k_full=False,
multi_gpu_batch=False,
edgevals=False,
):
"""Generate both cugraph and networkx betweenness centrality
Parameters
----------
graph_file : string
Path to COO Graph representation in .csv format
directed : bool, optional, default=True
k : int or None, optional, default=None
int: Number of sources to sample from
None: All sources are used to compute
normalized : bool
True: Normalize Betweenness Centrality scores
False: Scores are left unnormalized
weight : cudf.DataFrame:
Not supported as of 06/2020
endpoints : bool
True: Endpoints are included when computing scores
False: Endpoints are not considered
seed : int or None, optional, default=None
Seed for random sampling of the starting point
result_dtype : numpy.dtype
Expected type of the result, either np.float32 or np.float64
use_k_full : bool
When True, if k is None replaces k by the number of sources of the
Graph
multi_gpu_batch : bool
When True, enable mg batch after constructing the graph
edgevals: bool
When True, enable tests with weighted graph, should be ignored
during computation.
Returns
-------
sorted_df : cudf.DataFrame
Contains 'vertex' and 'cu_bc' 'ref_bc' columns, where 'cu_bc'
and 'ref_bc' are the two betweenness centrality scores to compare.
The dataframe is expected to be sorted based on 'vertex', so that we
can use cupy.isclose to compare the scores.
"""
G = None
Gnx = None
if edgevals:
edge_attr = "weight"
else:
edge_attr = None
G = graph_file.get_graph(
download=True,
create_using=cugraph.Graph(directed=directed),
ignore_weights=not edgevals,
)
M = G.to_pandas_edgelist().rename(
columns={"src": "0", "dst": "1", "wgt": edge_attr}
)
Gnx = nx.from_pandas_edgelist(
M,
source="0",
target="1",
edge_attr=edge_attr,
create_using=(nx.DiGraph() if directed else nx.Graph()),
)
assert G is not None and Gnx is not None
if multi_gpu_batch:
G.enable_batch()
calc_func = None
if k is not None and seed is not None:
calc_func = _calc_bc_subset
elif k is not None:
calc_func = _calc_bc_subset_fixed
else: # We processed to a comparison using every sources
if use_k_full:
k = Gnx.number_of_nodes()
calc_func = _calc_bc_full
sorted_df = calc_func(
G,
Gnx,
k=k,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=seed,
result_dtype=result_dtype,
)
return sorted_df
def _calc_bc_subset(G, Gnx, normalized, weight, endpoints, k, seed, result_dtype):
# NOTE: Networkx API does not allow passing a list of vertices
# And the sampling is operated on Gnx.nodes() directly
# We first mimic acquisition of the nodes to compare with same sources
random.seed(seed) # It will be called again in nx's call
sources = random.sample(list(Gnx.nodes()), k)
print("\nsources are ", sources)
df = cugraph.betweenness_centrality(
G,
k=sources,
normalized=normalized,
weight=weight,
endpoints=endpoints,
result_dtype=result_dtype,
)
sorted_df = (
df.sort_values("vertex")
.rename(columns={"betweenness_centrality": "cu_bc"}, copy=False)
.reset_index(drop=True)
)
nx_bc = nx.betweenness_centrality(
Gnx,
k=k,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=seed,
)
_, nx_bc = zip(*sorted(nx_bc.items()))
nx_df = cudf.DataFrame({"ref_bc": nx_bc})
merged_sorted_df = cudf.concat([sorted_df, nx_df], axis=1, sort=False)
return merged_sorted_df
def _calc_bc_subset_fixed(G, Gnx, normalized, weight, endpoints, k, seed, result_dtype):
assert isinstance(k, int), (
"This test is meant for verifying coherence " "when k is given as an int"
)
# In the fixed set we compare cu_bc against itself as we random.seed(seed)
# on the same seed and then sample on the number of vertices themselves
if seed is None:
seed = 123 # random.seed(None) uses time, but we want same sources
random.seed(seed) # It will be called again in cugraph's call
sources = random.sample(range(G.number_of_vertices()), k)
if G.renumbered:
sources_df = cudf.DataFrame({"src": sources})
sources = G.unrenumber(sources_df, "src")["src"].to_pandas().tolist()
# The first call is going to proceed to the random sampling in the same
# fashion as the lines above
df = cugraph.betweenness_centrality(
G,
k=k,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=seed,
result_dtype=result_dtype,
)
sorted_df = (
df.sort_values("vertex")
.rename(columns={"betweenness_centrality": "cu_bc"}, copy=False)
.reset_index(drop=True)
)
# The second call is going to process source that were already sampled
# We set seed to None as k : int, seed : not none should not be normal
# behavior
df2 = cugraph.betweenness_centrality(
G,
k=sources,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=None,
result_dtype=result_dtype,
)
sorted_df2 = (
df2.sort_values("vertex")
.rename(columns={"betweenness_centrality": "ref_bc"}, copy=False)
.reset_index(drop=True)
)
merged_sorted_df = cudf.concat(
[sorted_df, sorted_df2["ref_bc"]], axis=1, sort=False
)
return merged_sorted_df
def _calc_bc_full(G, Gnx, normalized, weight, endpoints, k, seed, result_dtype):
df = cugraph.betweenness_centrality(
G,
k=k,
normalized=normalized,
weight=weight,
endpoints=endpoints,
result_dtype=result_dtype,
)
assert (
df["betweenness_centrality"].dtype == result_dtype
), "'betweenness_centrality' column has not the expected type"
nx_bc = nx.betweenness_centrality(
Gnx, k=k, normalized=normalized, weight=weight, endpoints=endpoints
)
sorted_df = (
df.sort_values("vertex")
.rename(columns={"betweenness_centrality": "cu_bc"}, copy=False)
.reset_index(drop=True)
)
_, nx_bc = zip(*sorted(nx_bc.items()))
nx_df = cudf.DataFrame({"ref_bc": nx_bc})
merged_sorted_df = cudf.concat([sorted_df, nx_df], axis=1, sort=False)
return merged_sorted_df
# =============================================================================
# Utils
# =============================================================================
# NOTE: We assume that both column are ordered in such way that values
# at ith positions are expected to be compared in both columns
# i.e: sorted_df[idx][first_key] should be compared to
# sorted_df[idx][second_key]
def compare_scores(sorted_df, first_key, second_key, epsilon=DEFAULT_EPSILON):
errors = sorted_df[
~cupy.isclose(sorted_df[first_key], sorted_df[second_key], rtol=epsilon)
]
num_errors = len(errors)
if num_errors > 0:
print(errors)
assert (
num_errors == 0
), "Mismatch were found when comparing '{}' and '{}' (rtol = {})".format(
first_key, second_key, epsilon
)
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", [False, True])
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("endpoints", ENDPOINTS_OPTIONS)
@pytest.mark.parametrize("subset_seed", SUBSET_SEED_OPTIONS)
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_betweenness_centrality(
graph_file,
directed,
subset_size,
normalized,
weight,
endpoints,
subset_seed,
result_dtype,
edgevals,
):
sorted_df = calc_betweenness_centrality(
graph_file,
directed=directed,
normalized=normalized,
k=subset_size,
weight=weight,
endpoints=endpoints,
seed=subset_seed,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", [None])
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("endpoints", ENDPOINTS_OPTIONS)
@pytest.mark.parametrize("subset_seed", SUBSET_SEED_OPTIONS)
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("use_k_full", [True])
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
@pytest.mark.skip(reason="Skipping large tests")
def test_betweenness_centrality_k_full(
graph_file,
directed,
subset_size,
normalized,
weight,
endpoints,
subset_seed,
result_dtype,
use_k_full,
edgevals,
):
"""Tests full betweenness centrality by using k = G.number_of_vertices()
instead of k=None, checks that k scales properly"""
sorted_df = calc_betweenness_centrality(
graph_file,
directed=directed,
normalized=normalized,
k=subset_size,
weight=weight,
endpoints=endpoints,
seed=subset_seed,
result_dtype=result_dtype,
use_k_full=use_k_full,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
# NOTE: This test should only be execute on unrenumbered datasets
# the function operating the comparison inside is first proceeding
# to a random sampling over the number of vertices (thus direct offsets)
# in the graph structure instead of actual vertices identifiers
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", [karate_disjoint])
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("endpoints", ENDPOINTS_OPTIONS)
@pytest.mark.parametrize("subset_seed", [None])
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
@pytest.mark.skip(reason="Skipping large tests")
def test_betweenness_centrality_fixed_sample(
graph_file,
directed,
subset_size,
normalized,
weight,
endpoints,
subset_seed,
result_dtype,
edgevals,
):
"""Test Betweenness Centrality using a subset
Only k sources are considered for an approximate Betweenness Centrality
"""
sorted_df = calc_betweenness_centrality(
graph_file,
directed=directed,
k=subset_size,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=subset_seed,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [[]])
@pytest.mark.parametrize("endpoints", ENDPOINTS_OPTIONS)
@pytest.mark.parametrize("subset_seed", SUBSET_SEED_OPTIONS)
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
@pytest.mark.skip(reason="Skipping large tests")
def test_betweenness_centrality_weight_except(
graph_file,
directed,
subset_size,
normalized,
weight,
endpoints,
subset_seed,
result_dtype,
edgevals,
):
"""Calls betwenness_centrality with weight
As of 05/28/2020, weight is not supported and should raise
a NotImplementedError
"""
with pytest.raises(NotImplementedError):
sorted_df = calc_betweenness_centrality(
graph_file,
directed=directed,
k=subset_size,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=subset_seed,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("endpoints", ENDPOINTS_OPTIONS)
@pytest.mark.parametrize("subset_seed", SUBSET_SEED_OPTIONS)
@pytest.mark.parametrize("result_dtype", [str])
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_betweenness_invalid_dtype(
graph_file,
directed,
subset_size,
normalized,
weight,
endpoints,
subset_seed,
result_dtype,
edgevals,
):
"""Test calls edge_betwenness_centrality an invalid type"""
with pytest.raises(TypeError):
sorted_df = calc_betweenness_centrality(
graph_file,
directed=directed,
k=subset_size,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=subset_seed,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
# FIXME: update the datasets API to return Nx graph
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_betweenness_centrality_nx(graph_file, directed, edgevals):
Gnx = utils.generate_nx_graph_from_file(graph_file, directed, edgevals)
nx_bc = nx.betweenness_centrality(Gnx)
cu_bc = cugraph.betweenness_centrality(Gnx)
# Calculating mismatch
networkx_bc = sorted(nx_bc.items(), key=lambda x: x[0])
cugraph_bc = sorted(cu_bc.items(), key=lambda x: x[0])
err = 0
assert len(cugraph_bc) == len(networkx_bc)
for i in range(len(cugraph_bc)):
if (
abs(cugraph_bc[i][1] - networkx_bc[i][1]) > 0.01
and cugraph_bc[i][0] == networkx_bc[i][0]
):
err = err + 1
print(f"{cugraph_bc[i][1]} and {networkx_bc[i][1]}")
print(f"{cugraph_bc[i][0]} and {networkx_bc[i][0]}")
print("Mismatches:", err)
assert err < (0.01 * len(cugraph_bc))
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_eigenvector_centrality_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cudf
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.testing.utils import DATASETS
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize("directed", IS_DIRECTED)
@pytest.mark.parametrize("input_data_path", DATASETS)
def test_dask_mg_eigenvector_centrality(dask_client, directed, input_data_path):
input_data_path = input_data_path.as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=True)
mg_res = dcg.eigenvector_centrality(dg, tol=1e-6)
mg_res = mg_res.compute()
import networkx as nx
from cugraph.testing import utils
NM = utils.read_csv_for_nx(input_data_path)
if directed:
Gnx = nx.from_pandas_edgelist(
NM, create_using=nx.DiGraph(), source="0", target="1"
)
else:
Gnx = nx.from_pandas_edgelist(
NM, create_using=nx.Graph(), source="0", target="1"
)
# FIXME: Compare against cugraph instead of nx
nk = nx.eigenvector_centrality(Gnx)
import pandas as pd
pdf = pd.DataFrame(nk.items(), columns=["vertex", "eigenvector_centrality"])
exp_res = cudf.DataFrame(pdf)
err = 0
tol = 1.0e-05
compare_res = exp_res.merge(mg_res, on="vertex", suffixes=["_local", "_dask"])
for i in range(len(compare_res)):
diff = abs(
compare_res["eigenvector_centrality_local"].iloc[i]
- compare_res["eigenvector_centrality_dask"].iloc[i]
)
if diff > tol * 1.1:
err = err + 1
assert err == 0
@pytest.mark.mg
def test_dask_mg_eigenvector_centrality_transposed_false(dask_client):
input_data_path = DATASETS[0]
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=False)
warning_msg = (
"Eigenvector centrality expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
dcg.eigenvector_centrality(dg)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_edge_betweenness_centrality.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import random
import networkx as nx
import numpy as np
import cupy
import cudf
import cugraph
from cugraph.datasets import karate_disjoint
from cugraph.testing import utils, SMALL_DATASETS
# NOTE: Endpoint parameter is not currently being tested, there could be a test
# to verify that python raise an error if it is used
# =============================================================================
# Parameters
# =============================================================================
DIRECTED_GRAPH_OPTIONS = [False, True]
WEIGHTED_GRAPH_OPTIONS = [False, True]
NORMALIZED_OPTIONS = [False, True]
DEFAULT_EPSILON = 0.0001
SUBSET_SIZE_OPTIONS = [4, None]
# NOTE: The following is not really being exploited in the tests as the
# datasets that are used are too small to compare, but it ensures that both
# path are actually sane
RESULT_DTYPE_OPTIONS = [np.float32, np.float64]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Comparison functions
# =============================================================================
def calc_edge_betweenness_centrality(
graph_file,
directed=True,
k=None,
normalized=False,
weight=None,
seed=None,
result_dtype=np.float64,
use_k_full=False,
multi_gpu_batch=False,
edgevals=False,
):
"""Generate both cugraph and networkx edge betweenness centrality
Parameters
----------
graph_file : string
Path to COO Graph representation in .csv format
k : int or None, optional, default=None
int: Number of sources to sample from
None: All sources are used to compute
directed : bool, optional, default=True
normalized : bool
True: Normalize Betweenness Centrality scores
False: Scores are left unnormalized
weight : cudf.DataFrame:
Not supported as of 06/2020
seed : int or None, optional, default=None
Seed for random sampling of the starting point
result_dtype : numpy.dtype
Expected type of the result, either np.float32 or np.float64
use_k_full : bool
When True, if k is None replaces k by the number of sources of the
Graph
multi_gpu_batch: bool
When True, enable mg batch after constructing the graph
edgevals: bool
When True, enable tests with weighted graph, should be ignored
during computation.
Returns
-------
sorted_df : cudf.DataFrame
Contains 'src', 'dst', 'cu_bc' and 'ref_bc' columns, where 'cu_bc'
and 'ref_bc' are the two betweenness centrality scores to compare.
The dataframe is expected to be sorted based on 'src' then 'dst',
so that we can use cupy.isclose to compare the scores.
"""
G = None
Gnx = None
dataset_path = graph_file.get_path()
Gnx = utils.generate_nx_graph_from_file(
dataset_path, directed=directed, edgevals=edgevals
)
G = graph_file.get_graph(
create_using=cugraph.Graph(directed=directed), ignore_weights=not edgevals
)
assert G is not None and Gnx is not None
if multi_gpu_batch:
G.enable_batch()
if k is not None and seed is not None:
calc_func = _calc_bc_subset
elif k is not None:
calc_func = _calc_bc_subset_fixed
else: # We processed to a comparison using every sources
if use_k_full:
k = Gnx.number_of_nodes()
calc_func = _calc_bc_full
sorted_df = calc_func(
G,
Gnx,
k=k,
normalized=normalized,
weight=weight,
seed=seed,
result_dtype=result_dtype,
)
return sorted_df
def _rescale_e(betweenness, num_nodes, k):
for e in betweenness:
betweenness[e] *= num_nodes / k
return betweenness
def _calc_bc_subset(G, Gnx, normalized, weight, k, seed, result_dtype):
# NOTE: Networkx API does not allow passing a list of vertices
# And the sampling is operated on Gnx.nodes() directly
# We first mimic acquisition of the nodes to compare with same sources
random.seed(seed) # It will be called again in nx's call
sources = random.sample(list(Gnx.nodes()), k)
# NOTE: Since we sampled the Networkx graph, the sources are already
# external ids, so we don't need to translate to external ids for
# cugraph
df = cugraph.edge_betweenness_centrality(
G,
k=sources,
normalized=normalized,
weight=weight,
result_dtype=result_dtype,
)
nx_bc_dict = nx.edge_betweenness_centrality(
Gnx, k=k, normalized=normalized, weight=weight, seed=seed
)
if normalized or not Gnx.is_directed():
if k is not None:
nx_bc_dict = _rescale_e(nx_bc_dict, len(Gnx.nodes()), k)
nx_df = generate_nx_result(nx_bc_dict, type(Gnx) is nx.DiGraph).rename(
columns={"betweenness_centrality": "ref_bc"}, copy=False
)
merged_df = (
df.merge(nx_df, on=["src", "dst"])
.rename(columns={"betweenness_centrality": "cu_bc"}, copy=False)
.reset_index(drop=True)
)
return merged_df
def _calc_bc_subset_fixed(G, Gnx, normalized, weight, k, seed, result_dtype):
assert isinstance(k, int), (
"This test is meant for verifying coherence " "when k is given as an int"
)
# In the fixed set we compare cu_bc against itself as we random.seed(seed)
# on the same seed and then sample on the number of vertices themselves
if seed is None:
seed = 123 # We want the same sources so we use the same seed when
# randomly selecting vertices both below and internally(plc)
sources = G.select_random_vertices(seed, k)
if G.renumbered:
sources_df = cudf.DataFrame({"src": sources})
sources = G.unrenumber(sources_df, "src")["src"].to_pandas().tolist()
# The first call is going to proceed to the random sampling in the same
# fashion as the lines above
df = cugraph.edge_betweenness_centrality(
G,
k=k,
normalized=normalized,
weight=weight,
seed=seed,
result_dtype=result_dtype,
).rename(columns={"betweenness_centrality": "cu_bc"}, copy=False)
# The second call is going to process source that were already sampled
# We set seed to None as k : int, seed : not none should not be normal
# behavior
df2 = (
cugraph.edge_betweenness_centrality(
G,
k=sources,
normalized=normalized,
weight=weight,
seed=None,
result_dtype=result_dtype,
)
.rename(columns={"betweenness_centrality": "ref_bc"}, copy=False)
.reset_index(drop=True)
)
merged_df = df.merge(df2, on=["src", "dst"]).reset_index(drop=True)
return merged_df
def _calc_bc_full(G, Gnx, normalized, weight, k, seed, result_dtype):
df = cugraph.edge_betweenness_centrality(
G,
k=k,
normalized=normalized,
weight=weight,
seed=seed,
result_dtype=result_dtype,
)
assert (
df["betweenness_centrality"].dtype == result_dtype
), "'betweenness_centrality' column has not the expected type"
nx_bc_dict = nx.edge_betweenness_centrality(
Gnx, k=k, normalized=normalized, seed=seed, weight=weight
)
nx_df = generate_nx_result(nx_bc_dict, type(Gnx) is nx.DiGraph).rename(
columns={"betweenness_centrality": "ref_bc"}, copy=False
)
merged_df = (
df.merge(nx_df, on=["src", "dst"])
.rename(columns={"betweenness_centrality": "cu_bc"}, copy=False)
.reset_index(drop=True)
)
return merged_df
# =============================================================================
def compare_scores(sorted_df, first_key, second_key, epsilon=DEFAULT_EPSILON):
errors = sorted_df[
~cupy.isclose(sorted_df[first_key], sorted_df[second_key], rtol=epsilon)
]
num_errors = len(errors)
if num_errors > 0:
print(errors)
assert (
num_errors == 0
), "Mismatch were found when comparing '{}' and '{}' (rtol = {})".format(
first_key, second_key, epsilon
)
def generate_nx_result(nx_res_dict, directed):
df = generate_dataframe_from_nx_dict(nx_res_dict)
if not directed:
df = generate_upper_triangle(df)
sorted_nx_dataframe = df.sort_values(["src", "dst"])
sorted_nx_dataframe_new_index = sorted_nx_dataframe.reset_index(drop=True)
return sorted_nx_dataframe_new_index
def generate_dataframe_from_nx_dict(nx_dict):
nx_edges, nx_bc = zip(*sorted(nx_dict.items()))
nx_src, nx_dst = zip(*nx_edges)
df = cudf.DataFrame({"src": nx_src, "dst": nx_dst, "betweenness_centrality": nx_bc})
return df
def generate_upper_triangle(dataframe):
lower_triangle = dataframe["src"] >= dataframe["dst"]
dataframe[["src", "dst"]][lower_triangle] = dataframe[["dst", "src"]][
lower_triangle
]
return dataframe
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_edge_betweenness_centrality(
graph_file,
directed,
subset_size,
normalized,
weight,
result_dtype,
edgevals,
):
sorted_df = calc_edge_betweenness_centrality(
graph_file,
directed=directed,
normalized=normalized,
k=subset_size,
weight=weight,
seed=42,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", [None])
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("use_k_full", [True])
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_edge_betweenness_centrality_k_full(
graph_file,
directed,
subset_size,
normalized,
weight,
result_dtype,
use_k_full,
edgevals,
):
"""Tests full edge betweenness centrality by using k = G.number_of_vertices()
instead of k=None, checks that k scales properly"""
sorted_df = calc_edge_betweenness_centrality(
graph_file,
directed=directed,
normalized=normalized,
k=subset_size,
weight=weight,
seed=42,
result_dtype=result_dtype,
use_k_full=use_k_full,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
# NOTE: This test should only be execute on unrenumbered datasets
# the function operating the comparison inside is first proceeding
# to a random sampling over the number of vertices (thus direct offsets)
# in the graph structure instead of actual vertices identifiers
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", [karate_disjoint])
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_edge_betweenness_centrality_fixed_sample(
graph_file,
directed,
subset_size,
normalized,
weight,
result_dtype,
edgevals,
):
"""Test Edge Betweenness Centrality using a subset
Only k sources are considered for an approximate Betweenness Centrality
"""
sorted_df = calc_edge_betweenness_centrality(
graph_file,
directed=directed,
k=subset_size,
normalized=normalized,
weight=weight,
seed=None,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [[]])
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_edge_betweenness_centrality_weight_except(
graph_file,
directed,
subset_size,
normalized,
weight,
result_dtype,
edgevals,
):
"""Test calls edge_betweeness_centrality with weight parameter
As of 05/28/2020, weight is not supported and should raise
a NotImplementedError
"""
with pytest.raises(NotImplementedError):
sorted_df = calc_edge_betweenness_centrality(
graph_file,
directed=directed,
k=subset_size,
normalized=normalized,
weight=weight,
seed=42,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("result_dtype", [str])
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
def test_edge_betweenness_invalid_dtype(
graph_file,
directed,
subset_size,
normalized,
weight,
result_dtype,
edgevals,
):
"""Test calls edge_betwenness_centrality an invalid type"""
with pytest.raises(TypeError):
sorted_df = calc_edge_betweenness_centrality(
graph_file,
directed=directed,
k=subset_size,
normalized=normalized,
weight=weight,
seed=42,
result_dtype=result_dtype,
edgevals=edgevals,
)
compare_scores(sorted_df, first_key="cu_bc", second_key="ref_bc")
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", SMALL_DATASETS)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("edgevals", WEIGHTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
def test_edge_betweenness_centrality_nx(graph_file, directed, edgevals, normalized):
dataset_path = graph_file.get_path()
Gnx = utils.generate_nx_graph_from_file(dataset_path, directed, edgevals)
assert nx.is_directed(Gnx) == directed
nx_bc = nx.edge_betweenness_centrality(Gnx, normalized=normalized)
cu_bc = cugraph.edge_betweenness_centrality(Gnx, normalized=normalized)
# Calculating mismatch
networkx_bc = sorted(nx_bc.items(), key=lambda x: x[0])
cugraph_bc = sorted(cu_bc.items(), key=lambda x: x[0])
err = 0
assert len(networkx_bc) == len(cugraph_bc)
for i in range(len(cugraph_bc)):
if (
abs(cugraph_bc[i][1] - networkx_bc[i][1]) > 0.01
and cugraph_bc[i][0] == networkx_bc[i][0]
):
err = err + 1
print(
"type c_bc = ",
type(cugraph_bc[i][1]),
" type nx_bc = ",
type(networkx_bc[i][1]),
)
print("Mismatches:", err)
assert err < (0.01 * len(cugraph_bc))
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_batch_betweenness_centrality_mg.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import numpy as np
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.datasets import karate
from test_betweenness_centrality import (
calc_betweenness_centrality,
compare_scores,
)
DIRECTED_GRAPH_OPTIONS = [False, True]
WEIGHTED_GRAPH_OPTIONS = [False, True]
ENDPOINTS_OPTIONS = [False, True]
NORMALIZED_OPTIONS = [False, True]
DEFAULT_EPSILON = 0.0001
SUBSET_SIZE_OPTIONS = [4, None]
SUBSET_SEED_OPTIONS = [42]
# =============================================================================
# Parameters
# =============================================================================
DATASETS = [karate]
# FIXME: The "preset_gpu_count" from 21.08 and below are currently not
# supported and have been removed
RESULT_DTYPE_OPTIONS = [np.float64]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize(
"graph_file", DATASETS, ids=[f"dataset={d.get_path().stem}" for d in DATASETS]
)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("weight", [None])
@pytest.mark.parametrize("endpoints", ENDPOINTS_OPTIONS)
@pytest.mark.parametrize("subset_seed", SUBSET_SEED_OPTIONS)
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
def test_mg_betweenness_centrality(
graph_file,
directed,
subset_size,
normalized,
weight,
endpoints,
subset_seed,
result_dtype,
dask_client,
):
sorted_df = calc_betweenness_centrality(
graph_file,
directed=directed,
normalized=normalized,
k=subset_size,
weight=weight,
endpoints=endpoints,
seed=subset_seed,
result_dtype=result_dtype,
multi_gpu_batch=True,
)
compare_scores(
sorted_df,
first_key="cu_bc",
second_key="ref_bc",
epsilon=DEFAULT_EPSILON,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_degree_centrality_mg.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cudf
import dask_cudf
import cugraph
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
from cudf.testing import assert_series_equal
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
DATA_PATH = [
(RAPIDS_DATASET_ROOT_DIR_PATH / "karate-asymmetric.csv").as_posix(),
(RAPIDS_DATASET_ROOT_DIR_PATH / "polbooks.csv").as_posix(),
(RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv").as_posix(),
]
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize("directed", IS_DIRECTED)
@pytest.mark.parametrize("data_file", DATA_PATH)
def test_dask_mg_degree(dask_client, directed, data_file):
input_data_path = data_file
chunksize = cugraph.dask.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, "src", "dst")
dg.compute_renumber_edge_list()
g = cugraph.Graph(directed=directed)
g.from_cudf_edgelist(df, "src", "dst")
merge_df_in_degree = (
dg.in_degree()
.merge(g.in_degree(), on="vertex", suffixes=["_dg", "_g"])
.compute()
)
merge_df_out_degree = (
dg.out_degree()
.merge(g.out_degree(), on="vertex", suffixes=["_dg", "_g"])
.compute()
)
merge_df_degree = (
dg.degree().merge(g.degree(), on="vertex", suffixes=["_dg", "_g"]).compute()
)
assert_series_equal(
merge_df_in_degree["degree_dg"],
merge_df_in_degree["degree_g"],
check_names=False,
check_dtype=False,
)
assert_series_equal(
merge_df_out_degree["degree_dg"],
merge_df_out_degree["degree_g"],
check_names=False,
check_dtype=False,
)
assert_series_equal(
merge_df_degree["degree_dg"],
merge_df_degree["degree_g"],
check_names=False,
check_dtype=False,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/centrality/test_batch_edge_betweenness_centrality_mg.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import numpy as np
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.datasets import karate, netscience
# Get parameters from standard betwenness_centrality_test
# As tests directory is not a module, we need to add it to the path
# FIXME: Test must be reworked to import from 'cugraph.testing' instead of
# importing from other tests
from test_edge_betweenness_centrality import (
DIRECTED_GRAPH_OPTIONS,
NORMALIZED_OPTIONS,
DEFAULT_EPSILON,
SUBSET_SIZE_OPTIONS,
)
from test_edge_betweenness_centrality import (
calc_edge_betweenness_centrality,
compare_scores,
)
# =============================================================================
# Parameters
# =============================================================================
DATASETS = [karate, netscience]
# FIXME: The "preset_gpu_count" from 21.08 and below are not supported and have
# been removed
RESULT_DTYPE_OPTIONS = [np.float32, np.float64]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# FIXME: Fails for directed = False(bc score twice as much) and normalized = True.
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize(
"graph_file", DATASETS, ids=[f"dataset={d.get_path().stem}" for d in DATASETS]
)
@pytest.mark.parametrize("directed", DIRECTED_GRAPH_OPTIONS)
@pytest.mark.parametrize("subset_size", SUBSET_SIZE_OPTIONS)
@pytest.mark.parametrize("normalized", NORMALIZED_OPTIONS)
@pytest.mark.parametrize("result_dtype", RESULT_DTYPE_OPTIONS)
def test_mg_edge_betweenness_centrality(
graph_file,
directed,
subset_size,
normalized,
result_dtype,
dask_client,
):
sorted_df = calc_edge_betweenness_centrality(
graph_file,
directed=directed,
normalized=normalized,
k=subset_size,
weight=None,
seed=42,
result_dtype=result_dtype,
multi_gpu_batch=True,
)
compare_scores(
sorted_df,
first_key="cu_bc",
second_key="ref_bc",
epsilon=DEFAULT_EPSILON,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/docs/test_doctests.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import doctest
import inspect
import io
import os
import numpy as np
import pandas as pd
import scipy
import pytest
import cugraph
import pylibcugraph
import cudf
from numba import cuda
from cugraph.testing import utils
modules_to_skip = ["dask", "proto", "raft"]
datasets = utils.RAPIDS_DATASET_ROOT_DIR_PATH
cuda_version_string = ".".join([str(n) for n in cuda.runtime.get_version()])
def _is_public_name(name):
return not name.startswith("_")
def _is_python_module(member):
member_file = getattr(member, "__file__", "")
return os.path.splitext(member_file)[1] == ".py"
def _module_from_library(member, libname):
return libname in getattr(member, "__file__", "")
def _file_from_library(member, libname):
return libname in getattr(member, "__file__", "")
def _find_modules_in_obj(finder, obj, obj_name, criteria=None):
for name, member in inspect.getmembers(obj):
if criteria is not None and not criteria(name):
continue
if inspect.ismodule(member) and (member not in modules_to_skip):
yield from _find_doctests_in_obj(finder, member, obj_name, _is_public_name)
def _find_doctests_in_obj(finder, obj, obj_name, criteria=None):
"""Find all doctests in a module or class.
Parameters
----------
finder : doctest.DocTestFinder
The DocTestFinder object to use.
obj : module or class
The object to search for docstring examples.
criteria : callable, optional
Yields
------
doctest.DocTest
The next doctest found in the object.
"""
for name, member in inspect.getmembers(obj):
if criteria is not None and not criteria(name):
continue
if inspect.ismodule(member):
if _file_from_library(member, obj_name) and _is_python_module(member):
_find_doctests_in_obj(finder, member, obj_name, criteria)
if inspect.isfunction(member):
yield from _find_doctests_in_docstring(finder, member)
if inspect.isclass(member):
if member.__module__ and _module_from_library(member, obj_name):
yield from _find_doctests_in_docstring(finder, member)
def _find_doctests_in_docstring(finder, member):
for docstring in finder.find(member):
has_examples = docstring.examples
is_dask = "dask" in str(docstring)
# FIXME: when PropertyGraph is removed from EXPERIMENTAL
# manually including PropertyGraph until it is removed from EXPERIMENTAL
is_pg = "PropertyGraph" in str(docstring)
is_experimental = "EXPERIMENTAL" in str(docstring) and not is_pg
# if has_examples and not is_dask:
if has_examples and not is_dask and not is_experimental:
yield docstring
def _fetch_doctests():
finder = doctest.DocTestFinder()
yield from _find_modules_in_obj(finder, cugraph, "cugraph", _is_public_name)
yield from _find_modules_in_obj(
finder, pylibcugraph, "pylibcugraph", _is_public_name
)
def skip_docstring(docstring_obj):
"""
Returns a string indicating why the doctest example string should not be
tested, or None if it should be tested. This string can be used as the
"reason" arg to pytest.skip().
Currently, this function will return a reason string if the docstring
contains a line with the following text:
"currently not available on CUDA <version> systems"
where <version> is a major.minor version string, such as 11.4, that matches
the version of CUDA on the system running the test. An example of a line
in a docstring that would result in a reason string from this function
running on a CUDA 11.4 system is:
NOTE: this function is currently not available on CUDA 11.4 systems.
"""
docstring = docstring_obj.docstring
for line in docstring.splitlines():
if f"currently not available on CUDA {cuda_version_string} systems" in line:
return f"docstring example not supported on CUDA {cuda_version_string}"
return None
class TestDoctests:
abs_datasets_path = datasets.absolute()
@pytest.fixture(autouse=True)
def chdir_to_tmp_path(cls, tmp_path):
original_directory = os.getcwd()
try:
os.chdir(tmp_path)
yield
finally:
os.chdir(original_directory)
@pytest.mark.sg
@pytest.mark.parametrize(
"docstring", _fetch_doctests(), ids=lambda docstring: docstring.name
)
def test_docstring(self, docstring):
# We ignore differences in whitespace in the doctest output, and enable
# the use of an ellipsis "..." to match any string in the doctest
# output. An ellipsis is useful for, e.g., memory addresses or
# imprecise floating point values.
skip_reason = skip_docstring(docstring)
if skip_reason is not None:
pytest.skip(reason=skip_reason)
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
runner = doctest.DocTestRunner(optionflags=optionflags)
np.random.seed(6)
globs = dict(
cudf=cudf,
np=np,
cugraph=cugraph,
datasets_path=self.abs_datasets_path,
scipy=scipy,
pd=pd,
)
docstring.globs = globs
# Capture stdout and include failing outputs in the traceback.
doctest_stdout = io.StringIO()
with contextlib.redirect_stdout(doctest_stdout):
runner.run(docstring)
results = runner.summarize()
try:
assert not results.failed, (
f"{results.failed} of {results.attempted} doctests failed for "
f"{docstring.name}:\n{doctest_stdout.getvalue()}"
)
except AssertionError:
# If some failed but all the failures were due to lack of
# cugraph-ops support, we can skip.
out = doctest_stdout.getvalue()
if ("CUGRAPH_UNKNOWN_ERROR" in out and "unimplemented" in out) or (
"built with NO_CUGRAPH_OPS" in out
):
pytest.skip("Doctest requires cugraph-ops support.")
raise
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/docs/test_doctests_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import doctest
import inspect
import io
import os
import numpy as np
import pandas as pd
import scipy
import pytest
import cugraph
import cudf
from cugraph.testing import utils
datasets = utils.RAPIDS_DATASET_ROOT_DIR_PATH
def _is_public_name(name):
return not name.startswith("_")
def _is_python_module(member):
member_file = getattr(member, "__file__", "")
return os.path.splitext(member_file)[1] == ".py"
def _module_from_library(member, libname):
return libname in getattr(member, "__file__", "")
def _find_doctests_in_docstring(finder, member):
for docstring in finder.find(member):
if docstring.examples:
yield docstring
def _find_doctests_in_obj(finder, obj, obj_name, criteria=None):
"""Find all doctests in a module or class.
Parameters
----------
finder : doctest.DocTestFinder
The DocTestFinder object to use.
obj : module or class
The object to search for docstring examples.
obj_name : string
Used for ensuring a module is part of the object.
To be passed into _module_from_library.
criteria : callable, optional
Yields
------
doctest.DocTest
The next doctest found in the object.
"""
for name, member in inspect.getmembers(obj, inspect.isfunction):
if criteria is not None and not criteria(name):
continue
if inspect.ismodule(member):
yield from _find_doctests_in_obj(finder, member, obj_name, criteria)
if inspect.isfunction(member):
yield from _find_doctests_in_docstring(finder, member)
if inspect.isclass(member):
if _module_from_library(member, obj_name):
yield from _find_doctests_in_docstring(finder, member)
def _fetch_doctests():
finder = doctest.DocTestFinder()
yield from _find_doctests_in_obj(finder, cugraph.dask, "dask", _is_public_name)
@pytest.fixture(
scope="module", params=_fetch_doctests(), ids=lambda docstring: docstring.name
)
def docstring(request):
return request.param
class TestDoctests:
abs_datasets_path = datasets.absolute()
@pytest.fixture(autouse=True)
def chdir_to_tmp_path(cls, tmp_path):
original_directory = os.getcwd()
try:
os.chdir(tmp_path)
yield
finally:
os.chdir(original_directory)
@pytest.mark.mg
def test_docstring(self, dask_client, docstring):
# We ignore differences in whitespace in the doctest output, and enable
# the use of an ellipsis "..." to match any string in the doctest
# output. An ellipsis is useful for, e.g., memory addresses or
# imprecise floating point values.
optionflags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE
runner = doctest.DocTestRunner(optionflags=optionflags)
np.random.seed(6)
globs = dict(
cudf=cudf,
np=np,
cugraph=cugraph,
datasets_path=self.abs_datasets_path,
scipy=scipy,
pd=pd,
)
docstring.globs = globs
# Capture stdout and include failing outputs in the traceback.
doctest_stdout = io.StringIO()
with contextlib.redirect_stdout(doctest_stdout):
runner.run(docstring)
results = runner.summarize()
assert not results.failed, (
f"{results.failed} of {results.attempted} doctests failed for "
f"{docstring.name}:\n{doctest_stdout.getvalue()}"
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/traversal/test_sssp_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cudf
import cugraph
import dask_cudf
import cugraph.dask as dcg
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_sssp(dask_client, directed):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "netscience.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
g = cugraph.Graph(directed=directed)
g.from_cudf_edgelist(df, "src", "dst", "value", renumber=True)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value")
expected_dist = cugraph.sssp(g, 0)
print(expected_dist)
result_dist = dcg.sssp(dg, 0)
result_dist = result_dist.compute()
compare_dist = expected_dist.merge(
result_dist, on="vertex", suffixes=["_local", "_dask"]
)
err = 0
for i in range(len(compare_dist)):
if (
compare_dist["distance_local"].iloc[i]
!= compare_dist["distance_dask"].iloc[i]
):
err = err + 1
assert err == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/traversal/test_bfs_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import cudf
import cugraph
import dask_cudf
import cugraph.dask as dcg
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_bfs(dask_client, directed):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "netscience.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
def modify_dataset(df):
temp_df = cudf.DataFrame()
temp_df["src"] = df["src"] + 1000
temp_df["dst"] = df["dst"] + 1000
temp_df["value"] = df["value"]
return cudf.concat([df, temp_df])
meta = ddf._meta
ddf = ddf.map_partitions(
modify_dataset, meta=meta, token="custom-" + str(random.random())
)
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
df = modify_dataset(df)
g = cugraph.Graph(directed=directed)
g.from_cudf_edgelist(df, "src", "dst")
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, "src", "dst")
expected_dist = cugraph.bfs(g, [0, 1000])
result_dist = dcg.bfs(dg, [0, 1000])
result_dist = result_dist.compute()
compare_dist = expected_dist.merge(
result_dist, on="vertex", suffixes=["_local", "_dask"]
)
err = 0
for i in range(len(compare_dist)):
if (
compare_dist["distance_local"].iloc[i]
!= compare_dist["distance_dask"].iloc[i]
):
err = err + 1
assert err == 0
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_bfs_invalid_start(dask_client, directed):
source_vertex = 10
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "netscience.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
el = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
newval = max(el.src.max().compute(), el.dst.max().compute()) + 1
el.src = el.src.replace(source_vertex, newval)
el.dst = el.dst.replace(source_vertex, newval)
G = cugraph.Graph(directed=directed)
G.from_dask_cudf_edgelist(el, "src", "dst")
with pytest.raises(ValueError):
dcg.bfs(G, source_vertex).compute()
# invalid dtype (the default cudf.Series() dtype is int64)
source_vertex = cudf.Series([0, 1])
warning_msg = "The 'start' values dtype must match " "the graph's vertices dtype."
with pytest.warns(UserWarning, match=warning_msg):
dcg.bfs(G, source_vertex).compute()
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
@pytest.mark.parametrize("directed", IS_DIRECTED)
def test_dask_mg_bfs_multi_column_depthlimit(dask_client, directed):
gc.collect()
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "netscience.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src_a", "dst_a", "value"],
dtype=["int32", "int32", "float32"],
)
ddf["src_b"] = ddf["src_a"] + 1000
ddf["dst_b"] = ddf["dst_a"] + 1000
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src_a", "dst_a", "value"],
dtype=["int32", "int32", "float32"],
)
df["src_b"] = df["src_a"] + 1000
df["dst_b"] = df["dst_a"] + 1000
g = cugraph.Graph(directed=directed)
g.from_cudf_edgelist(df, ["src_a", "src_b"], ["dst_a", "dst_b"])
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, ["src_a", "src_b"], ["dst_a", "dst_b"])
start = cudf.DataFrame()
start["a"] = [0]
start["b"] = [1000]
depth_limit = 18
expected_dist = cugraph.bfs(g, start, depth_limit=depth_limit)
result_dist = dcg.bfs(dg, start, depth_limit=depth_limit)
result_dist = result_dist.compute()
compare_dist = expected_dist.merge(
result_dist, on=["0_vertex", "1_vertex"], suffixes=["_local", "_dask"]
)
err = 0
for i in range(len(compare_dist)):
if (
compare_dist["distance_local"].iloc[i] <= depth_limit
and compare_dist["distance_dask"].iloc[i] <= depth_limit
and compare_dist["distance_local"].iloc[i]
!= compare_dist["distance_dask"].iloc[i]
):
err = err + 1
assert err == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/traversal/test_filter_unreachable.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import time
import pytest
import numpy as np
import networkx as nx
import cugraph
from cugraph.testing import DEFAULT_DATASETS
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
print("Networkx version : {} ".format(nx.__version__))
SOURCES = [1]
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("source", SOURCES)
def test_filter_unreachable(graph_file, source):
G = graph_file.get_graph(create_using=cugraph.Graph(directed=True))
cu_M = G.view_edge_list()
print("sources size = " + str(len(cu_M)))
print("destinations size = " + str(len(cu_M)))
print("cugraph Solving... ")
t1 = time.time()
df = cugraph.sssp(G, source)
t2 = time.time() - t1
print("Time : " + str(t2))
reachable_df = cugraph.filter_unreachable(df)
if np.issubdtype(df["distance"].dtype, np.integer):
inf = np.iinfo(reachable_df["distance"].dtype).max
assert len(reachable_df.query("distance == @inf")) == 0
elif np.issubdtype(df["distance"].dtype, np.inexact):
inf = np.finfo(reachable_df["distance"].dtype).max # noqa: F841
assert len(reachable_df.query("distance == @inf")) == 0
assert len(reachable_df) != 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/traversal/test_paths.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from tempfile import NamedTemporaryFile
import math
import numpy as np
import pytest
import cudf
import cupy
import cugraph
from cugraph.testing import get_resultset, load_resultset
from cupyx.scipy.sparse import coo_matrix as cupy_coo_matrix
CONNECTED_GRAPH = """1,5,3
1,4,1
1,2,1
1,6,2
1,7,2
4,5,1
2,3,1
7,6,2
"""
DISCONNECTED_GRAPH = CONNECTED_GRAPH + "8,9,4"
# Single value or callable golden results are not added as a Resultset
paths_golden_results = {
"shortest_path_length_1_1": 0,
"shortest_path_length_1_5": 2.0,
"shortest_path_length_1_3": 2.0,
"shortest_path_length_1_6": 2.0,
"shortest_path_length_-1_1": ValueError,
"shortest_path_length_1_10": ValueError,
"shortest_path_length_0_42": ValueError,
"shortest_path_length_1_8": 3.4028235e38,
}
# Fixture that loads all golden results necessary to run cugraph tests if the
# tests are not already present in the designated results directory. Most of the
# time, this will only check if the module-specific mapping file exists.
@pytest.fixture(scope="module")
def load_traversal_results():
load_resultset(
"traversal", "https://data.rapids.ai/cugraph/results/resultsets.tar.gz"
)
@pytest.fixture
def graphs(request):
with NamedTemporaryFile(mode="w+", suffix=".csv") as graph_tf:
graph_tf.writelines(request.param)
graph_tf.seek(0)
cudf_df = cudf.read_csv(
graph_tf.name,
names=["src", "dst", "data"],
delimiter=",",
dtype=["int32", "int32", "float64"],
)
cugraph_G = cugraph.Graph()
cugraph_G.from_cudf_edgelist(
cudf_df, source="src", destination="dst", edge_attr="data"
)
# construct cupy coo_matrix graph
i = []
j = []
weights = []
for index in range(cudf_df.shape[0]):
vertex1 = cudf_df.iloc[index]["src"]
vertex2 = cudf_df.iloc[index]["dst"]
weight = cudf_df.iloc[index]["data"]
i += [vertex1, vertex2]
j += [vertex2, vertex1]
weights += [weight, weight]
i = cupy.array(i)
j = cupy.array(j)
weights = cupy.array(weights)
largest_vertex = max(cupy.amax(i), cupy.amax(j))
cupy_df = cupy_coo_matrix(
(weights, (i, j)), shape=(largest_vertex + 1, largest_vertex + 1)
)
yield cugraph_G, cupy_df
@pytest.mark.sg
@pytest.mark.parametrize("graphs", [CONNECTED_GRAPH], indirect=True)
def test_connected_graph_shortest_path_length(graphs):
cugraph_G, cupy_df = graphs
path_1_to_1_length = cugraph.shortest_path_length(cugraph_G, 1, 1)
# FIXME: aren't the first two assertions in each batch redundant?
assert path_1_to_1_length == 0.0
assert path_1_to_1_length == paths_golden_results["shortest_path_length_1_1"]
assert path_1_to_1_length == cugraph.shortest_path_length(cupy_df, 1, 1)
path_1_to_5_length = cugraph.shortest_path_length(cugraph_G, 1, 5)
assert path_1_to_5_length == 2.0
assert path_1_to_5_length == paths_golden_results["shortest_path_length_1_5"]
assert path_1_to_5_length == cugraph.shortest_path_length(cupy_df, 1, 5)
path_1_to_3_length = cugraph.shortest_path_length(cugraph_G, 1, 3)
assert path_1_to_3_length == 2.0
assert path_1_to_3_length == paths_golden_results["shortest_path_length_1_3"]
assert path_1_to_3_length == cugraph.shortest_path_length(cupy_df, 1, 3)
path_1_to_6_length = cugraph.shortest_path_length(cugraph_G, 1, 6)
assert path_1_to_6_length == 2.0
assert path_1_to_6_length == paths_golden_results["shortest_path_length_1_6"]
assert path_1_to_6_length == cugraph.shortest_path_length(cupy_df, 1, 6)
@pytest.mark.sg
@pytest.mark.parametrize("graphs", [CONNECTED_GRAPH], indirect=True)
def test_shortest_path_length_invalid_source(graphs):
cugraph_G, cupy_df = graphs
with pytest.raises(ValueError):
cugraph.shortest_path_length(cugraph_G, -1, 1)
result = paths_golden_results["shortest_path_length_-1_1"]
if callable(result):
with pytest.raises(ValueError):
raise result()
with pytest.raises(ValueError):
cugraph.shortest_path_length(cupy_df, -1, 1)
@pytest.mark.sg
@pytest.mark.parametrize("graphs", [DISCONNECTED_GRAPH], indirect=True)
def test_shortest_path_length_invalid_target(graphs):
cugraph_G, cupy_df = graphs
with pytest.raises(ValueError):
cugraph.shortest_path_length(cugraph_G, 1, 10)
result = paths_golden_results["shortest_path_length_1_10"]
if callable(result):
with pytest.raises(ValueError):
raise result()
with pytest.raises(ValueError):
cugraph.shortest_path_length(cupy_df, 1, 10)
@pytest.mark.sg
@pytest.mark.parametrize("graphs", [CONNECTED_GRAPH], indirect=True)
def test_shortest_path_length_invalid_vertexes(graphs):
cugraph_G, cupy_df = graphs
with pytest.raises(ValueError):
cugraph.shortest_path_length(cugraph_G, 0, 42)
result = paths_golden_results["shortest_path_length_0_42"]
if callable(result):
with pytest.raises(ValueError):
raise result()
with pytest.raises(ValueError):
cugraph.shortest_path_length(cupy_df, 0, 42)
@pytest.mark.sg
@pytest.mark.parametrize("graphs", [DISCONNECTED_GRAPH], indirect=True)
def test_shortest_path_length_no_path(graphs):
cugraph_G, cupy_df = graphs
# FIXME: In case there is no path between two vertices, the
# result can be either the max of float32 or float64
max_float_32 = (2 - math.pow(2, -23)) * math.pow(2, 127)
path_1_to_8 = cugraph.shortest_path_length(cugraph_G, 1, 8)
assert path_1_to_8 == sys.float_info.max
golden_path_1_to_8 = paths_golden_results["shortest_path_length_1_8"]
golden_path_1_to_8 = np.float32(golden_path_1_to_8)
assert golden_path_1_to_8 in [
max_float_32,
path_1_to_8,
]
assert path_1_to_8 == cugraph.shortest_path_length(cupy_df, 1, 8)
@pytest.mark.sg
@pytest.mark.parametrize("graphs", [DISCONNECTED_GRAPH], indirect=True)
def test_shortest_path_length_no_target(graphs, load_traversal_results):
cugraph_G, cupy_df = graphs
cugraph_path_1_to_all = cugraph.shortest_path_length(cugraph_G, 1)
golden_path_1_to_all = get_resultset(
resultset_name="traversal",
algo="shortest_path_length",
graph_dataset="DISCONNECTED",
graph_directed=str(True),
source="1",
weight="weight",
)
cupy_path_1_to_all = cugraph.shortest_path_length(cupy_df, 1)
# Cast networkx graph on cugraph vertex column type from str to int.
# SSSP preserves vertex type, convert for comparison
assert cugraph_path_1_to_all == cupy_path_1_to_all
# results for vertex 8 and 9 are not returned
assert cugraph_path_1_to_all.shape[0] == len(golden_path_1_to_all) + 2
for index in range(cugraph_path_1_to_all.shape[0]):
vertex = cugraph_path_1_to_all["vertex"][index].item()
distance = cugraph_path_1_to_all["distance"][index].item()
# verify cugraph against networkx
if vertex in {8, 9}:
# Networkx does not return distances for these vertexes.
assert distance == sys.float_info.max
else:
assert (
distance
== golden_path_1_to_all.loc[
golden_path_1_to_all.vertex == vertex
].distance.iloc[0]
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/traversal/test_bfs.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import cupy as cp
import numpy as np
from scipy.sparse import coo_matrix as sp_coo_matrix
from scipy.sparse import csr_matrix as sp_csr_matrix
from scipy.sparse import csc_matrix as sp_csc_matrix
import cudf
import cugraph
from cupyx.scipy.sparse import coo_matrix as cp_coo_matrix
from cupyx.scipy.sparse import csr_matrix as cp_csr_matrix
from cupyx.scipy.sparse import csc_matrix as cp_csc_matrix
from pylibcugraph.testing.utils import gen_fixture_params_product
from cugraph.testing import (
utils,
get_resultset,
load_resultset,
DEFAULT_DATASETS,
SMALL_DATASETS,
)
# =============================================================================
# Parameters
# =============================================================================
DIRECTED_GRAPH_OPTIONS = [True, False]
SUBSET_SEED_OPTIONS = [42]
DATASET_STARTS = {
"dolphins": 16,
"karate": 7,
"karate-disjoint": 19,
"netscience": 1237,
}
DEFAULT_EPSILON = 1e-6
DEPTH_LIMITS = [None, 1, 5, 18]
# Map of cuGraph input types to the expected output type for cuGraph
# connected_components calls.
cuGraph_input_output_map = {
cugraph.Graph: cudf.DataFrame,
cp_coo_matrix: tuple,
cp_csr_matrix: tuple,
cp_csc_matrix: tuple,
sp_coo_matrix: tuple,
sp_csr_matrix: tuple,
sp_csc_matrix: tuple,
}
cupy_types = [cp_coo_matrix, cp_csr_matrix, cp_csc_matrix]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Helper functions
# =============================================================================
def convert_output_to_cudf(input_G_or_matrix, cugraph_result):
"""
Convert cugraph_result to a cudf DataFrame. The conversion is based on the
type of input_G_or_matrix, since different input types result in different
cugraph_result types (see cugraph_input_output_map).
"""
input_type = type(input_G_or_matrix)
expected_return_type = cuGraph_input_output_map[type(input_G_or_matrix)]
assert type(cugraph_result) is expected_return_type
if expected_return_type is cudf.DataFrame:
return cugraph_result
# A CuPy/SciPy input means the return value will be a 2-tuple of:
# distance: cupy.ndarray
# ndarray of shortest distances between source and vertex.
# predecessor: cupy.ndarray
# ndarray of predecessors of a vertex on the path from source, which
# can be used to reconstruct the shortest paths.
# or a 3-tuple of the above 2 plus
# sp_counter: cupy.ndarray
# for the i'th position in the array, the number of shortest paths
# leading to the vertex at position i in the (input) vertex array.
elif expected_return_type is tuple:
if input_type in cupy_types:
assert type(cugraph_result[0]) is cp.ndarray
assert type(cugraph_result[1]) is cp.ndarray
if len(cugraph_result) == 3:
assert type(cugraph_result[2]) is cp.ndarray
else:
assert type(cugraph_result[0]) is np.ndarray
assert type(cugraph_result[1]) is np.ndarray
if len(cugraph_result) == 3:
assert type(cugraph_result[2]) is np.ndarray
# Get unique verts from input since they are not incuded in output
if type(input_G_or_matrix) in [
cp_csr_matrix,
cp_csc_matrix,
sp_csr_matrix,
sp_csc_matrix,
]:
coo = input_G_or_matrix.tocoo(copy=False)
else:
coo = input_G_or_matrix
verts = sorted(set([n.item() for n in coo.col] + [n.item() for n in coo.row]))
dists = [n.item() for n in cugraph_result[0]]
preds = [n.item() for n in cugraph_result[1]]
assert len(verts) == len(dists) == len(preds)
d = {"vertex": verts, "distance": dists, "predecessor": preds}
if len(cugraph_result) == 3:
counters = [n.item() for n in cugraph_result[2]]
assert len(counters) == len(verts)
d.update({"sp_counter": counters})
return cudf.DataFrame(d)
else:
raise RuntimeError(f"unsupported return type: {expected_return_type}")
# NOTE: We need to use relative error, the values of the shortest path
# counters can reach extremely high values 1e+80 and above
def compare_single_sp_counter(result, expected, epsilon=DEFAULT_EPSILON):
return np.isclose(result, expected, rtol=epsilon)
def compare_bfs(benchmark_callable, G, golden_values, start_vertex, depth_limit):
"""
Generate both cugraph and reference bfs traversal.
"""
if isinstance(start_vertex, int):
result = benchmark_callable(cugraph.bfs_edges, G, start_vertex)
cugraph_df = convert_output_to_cudf(G, result)
compare_func = _compare_bfs
# NOTE: We need to take 2 different path for verification as the nx
# functions used as reference return dictionaries that might
# not contain all the vertices while the cugraph version return
# a cudf.DataFrame with all the vertices, also some verification
# become slow with the data transfer
compare_func(cugraph_df, golden_values, start_vertex)
elif isinstance(start_vertex, list): # For other Verifications
all_golden_values = golden_values
all_cugraph_distances = []
def func_to_benchmark():
for sv in start_vertex:
cugraph_df = cugraph.bfs_edges(G, sv, depth_limit=depth_limit)
all_cugraph_distances.append(cugraph_df)
benchmark_callable(func_to_benchmark)
compare_func = _compare_bfs
for (i, sv) in enumerate(start_vertex):
cugraph_df = convert_output_to_cudf(G, all_cugraph_distances[i])
compare_func(cugraph_df, all_golden_values[i], sv)
else: # Unknown type given to seed
raise NotImplementedError("Invalid type for start_vertex")
def _compare_bfs(cugraph_df, golden_distances, source):
# This call should only contain 3 columns:
# 'vertex', 'distance', 'predecessor'
# It also confirms wether or not 'sp_counter' has been created by the call
# 'sp_counter' triggers atomic operations in BFS, thus we want to make
# sure that it was not the case
# NOTE: 'predecessor' is always returned while the C++ function allows to
# pass a nullptr
assert len(cugraph_df.columns) == 3, (
"The result of the BFS has an invalid " "number of columns"
)
cu_distances = {
vertex: dist
for vertex, dist in zip(
cugraph_df["vertex"].to_numpy(), cugraph_df["distance"].to_numpy()
)
}
cu_predecessors = {
vertex: dist
for vertex, dist in zip(
cugraph_df["vertex"].to_numpy(), cugraph_df["predecessor"].to_numpy()
)
}
# FIXME: The following only verifies vertices that were reached
# by cugraph's BFS.
# We assume that the distances are given back as integers in BFS
# max_val = np.iinfo(df['distance'].dtype).max
# Unreached vertices have a distance of max_val
missing_vertex_error = 0
distance_mismatch_error = 0
invalid_predecessor_error = 0
for vertex in golden_distances:
if vertex in cu_distances:
result = cu_distances[vertex]
expected = golden_distances[vertex]
if result != expected:
print(
"[ERR] Mismatch on distances: "
"vid = {}, cugraph = {}, golden = {}".format(
vertex, result, expected
)
)
distance_mismatch_error += 1
if vertex not in cu_predecessors:
missing_vertex_error += 1
else:
pred = cu_predecessors[vertex]
if vertex != source and pred not in golden_distances:
invalid_predecessor_error += 1
else:
# The graph is unweighted thus, predecessors are 1 away
if vertex != source and (
(golden_distances[pred] + 1 != cu_distances[vertex])
):
print(
"[ERR] Invalid on predecessors: "
"vid = {}, cugraph = {}".format(vertex, pred)
)
invalid_predecessor_error += 1
else:
missing_vertex_error += 1
assert missing_vertex_error == 0, "There are missing vertices"
assert distance_mismatch_error == 0, "There are invalid distances"
assert invalid_predecessor_error == 0, "There are invalid predecessors"
def get_cu_graph_and_params(dataset, directed):
"""
Helper for fixtures returning a cuGraph obj and params.
"""
# create graph
G = dataset.get_graph(create_using=cugraph.Graph(directed=directed))
dataset_path = dataset.get_path()
dataset_name = dataset.metadata["name"]
return (G, dataset_path, dataset_name, directed)
def get_cu_graph_golden_results_and_params(
depth_limit, G, dataset_path, dataset_name, directed, _
):
"""
Helper for fixtures returning golden results and params.
"""
start_vertex = DATASET_STARTS[dataset_name]
golden_values = get_resultset(
resultset_name="traversal",
algo="single_source_shortest_path_length",
cutoff=str(depth_limit),
graph_dataset=dataset_name,
graph_directed=str(directed),
start_vertex=str(start_vertex),
)
golden_values = cudf.Series(
golden_values.distance.values, index=golden_values.vertex
).to_dict()
return (G, dataset_path, directed, golden_values, start_vertex, depth_limit)
# =============================================================================
# Pytest Fixtures
# =============================================================================
SEEDS = [pytest.param(s) for s in SUBSET_SEED_OPTIONS]
DIRECTED = [pytest.param(d) for d in DIRECTED_GRAPH_OPTIONS]
DATASETS = [pytest.param(d) for d in DEFAULT_DATASETS]
SMALL_DATASETS = [pytest.param(d) for d in SMALL_DATASETS]
DEPTH_LIMIT = [pytest.param(d) for d in DEPTH_LIMITS]
# Call gen_fixture_params_product() to caluculate the cartesian product of
# multiple lists of params. This is required since parameterized fixtures do
# not do this automatically (unlike multiply-parameterized tests). The 2nd
# item in the tuple is a label for the param value used when displaying the
# full test name.
algo_test_fixture_params = gen_fixture_params_product((DEPTH_LIMIT, "depth_limit"))
graph_fixture_params = gen_fixture_params_product(
(DATASETS, "ds"), (DIRECTED, "dirctd")
)
small_graph_fixture_params = gen_fixture_params_product(
(SMALL_DATASETS, "ds"), (DIRECTED, "dirctd")
)
# The single param list variants are used when only 1 param combination is
# needed (eg. testing non-native input types where tests for other combinations
# was covered elsewhere).
single_algo_test_fixture_params = gen_fixture_params_product(
([DEPTH_LIMIT[0]], "depth_limit")
)
single_small_graph_fixture_params = gen_fixture_params_product(
([SMALL_DATASETS[0]], "ds"), (DIRECTED, "dirctd")
)
# Fixture that loads all golden results necessary to run cugraph tests if the
# tests are not already present in the designated results directory. Most of the
# time, this will only check if the module-specific mapping file exists.
@pytest.fixture(scope="module")
def load_traversal_results():
load_resultset(
"traversal", "https://data.rapids.ai/cugraph/results/resultsets.tar.gz"
)
# Fixtures that result in a test-per (dataset X directed/undirected)
# combination. These return the path to the dataset, a bool indicating if a
# directed graph is being used, and the Nx graph object.
@pytest.fixture(scope="module", params=graph_fixture_params)
def dataset_golden_results(request):
return get_cu_graph_and_params(*request.param)
@pytest.fixture(scope="module", params=small_graph_fixture_params)
def small_dataset_golden_results(request):
return get_cu_graph_and_params(*request.param)
@pytest.fixture(scope="module", params=single_small_graph_fixture_params)
def single_small_dataset_golden_results(request):
return get_cu_graph_and_params(*request.param)
# Fixtures that result in a test-per (dataset_nx_graph combinations X algo_test
# param combinations) combination. These run Nx BFS on the Nx graph obj and
# return the path to the dataset, if a directed graph is being used, the Nx BFS
# results, the starting vertex for BFS, and flag if shortes path counting was
# used.
@pytest.fixture(scope="module", params=algo_test_fixture_params)
def dataset_goldenresults_startvertex_spc(
dataset_golden_results, load_traversal_results, request
):
return get_cu_graph_golden_results_and_params(
*request.param, *dataset_golden_results, load_traversal_results
)
@pytest.fixture(scope="module", params=single_algo_test_fixture_params)
def single_dataset_goldenresults_startvertex_spc(
single_small_dataset_golden_results, load_traversal_results, request
):
return get_cu_graph_golden_results_and_params(
*request.param, *single_small_dataset_golden_results, load_traversal_results
)
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
@pytest.mark.parametrize("cugraph_input_type", utils.CUGRAPH_INPUT_TYPES)
def test_bfs(gpubenchmark, dataset_goldenresults_startvertex_spc, cugraph_input_type):
"""
Test BFS traversal on random source with distance and predecessors
"""
(
G,
dataset,
directed,
golden_values,
start_vertex,
depth_limit,
) = dataset_goldenresults_startvertex_spc
if directed:
if isinstance(cugraph_input_type, cugraph.Graph):
cugraph_input_type = cugraph.Graph(directed=True)
if not isinstance(cugraph_input_type, cugraph.Graph):
G_or_matrix = utils.create_obj_from_csv(dataset, cugraph_input_type)
else:
G_or_matrix = G
compare_bfs(gpubenchmark, G_or_matrix, golden_values, start_vertex, depth_limit)
@pytest.mark.sg
@pytest.mark.parametrize("cugraph_input_type", utils.MATRIX_INPUT_TYPES)
def test_bfs_nonnative_inputs_matrix(
gpubenchmark, single_dataset_goldenresults_startvertex_spc, cugraph_input_type
):
test_bfs(
gpubenchmark, single_dataset_goldenresults_startvertex_spc, cugraph_input_type
)
@pytest.mark.sg
def test_bfs_nonnative_inputs_nx(
single_dataset_goldenresults_startvertex_spc,
):
(
_,
_,
directed,
golden_values,
start_vertex,
_,
) = single_dataset_goldenresults_startvertex_spc
cugraph_df = get_resultset(
resultset_name="traversal",
algo="bfs_edges",
graph_dataset="karate",
graph_directed=str(directed),
source=str(start_vertex),
)
compare_func = _compare_bfs
compare_func(cugraph_df, golden_values, start_vertex)
@pytest.mark.sg
@pytest.mark.parametrize("cugraph_input_type", utils.CUGRAPH_INPUT_TYPES)
def test_bfs_invalid_start(dataset_goldenresults_startvertex_spc, cugraph_input_type):
(G, _, _, _, start_vertex, depth_limit) = dataset_goldenresults_startvertex_spc
el = G.view_edge_list()
newval = max(el.src.max(), el.dst.max()) + 1
start_vertex = newval
with pytest.raises(ValueError):
cugraph.bfs(G, start_vertex, depth_limit=depth_limit)
@pytest.mark.sg
def test_scipy_api_compat():
graph_file = DEFAULT_DATASETS[0]
dataset_path = graph_file.get_path()
input_cugraph_graph = graph_file.get_graph(ignore_weights=True)
input_coo_matrix = utils.create_obj_from_csv(
dataset_path, cp_coo_matrix, edgevals=True
)
# Ensure scipy-only options are rejected for cugraph inputs
with pytest.raises(TypeError):
cugraph.bfs(input_cugraph_graph, start=0, directed=False)
with pytest.raises(TypeError):
cugraph.bfs(input_cugraph_graph) # required arg missing
# Ensure cugraph-compatible options work as expected
cugraph.bfs(input_cugraph_graph, i_start=0)
cugraph.bfs(input_cugraph_graph, i_start=0)
# cannot have start and i_start
with pytest.raises(TypeError):
cugraph.bfs(input_cugraph_graph, start=0, i_start=0)
# Ensure SciPy options for matrix inputs work as expected
cugraph.bfs(input_coo_matrix, i_start=0)
cugraph.bfs(input_coo_matrix, i_start=0, directed=True)
cugraph.bfs(input_coo_matrix, i_start=0, directed=False)
result = cugraph.bfs(input_coo_matrix, i_start=0)
assert type(result) is tuple
assert len(result) == 2
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/traversal/test_sssp.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import numpy as np
import pandas as pd
import cudf
import cupyx
import cugraph
import cupy as cp
from cupyx.scipy.sparse import coo_matrix as cp_coo_matrix
from cupyx.scipy.sparse import csr_matrix as cp_csr_matrix
from cupyx.scipy.sparse import csc_matrix as cp_csc_matrix
from scipy.sparse import coo_matrix as sp_coo_matrix
from scipy.sparse import csr_matrix as sp_csr_matrix
from scipy.sparse import csc_matrix as sp_csc_matrix
from pylibcugraph.testing.utils import gen_fixture_params_product
from cugraph.testing import (
utils,
get_resultset,
load_resultset,
UNDIRECTED_DATASETS,
SMALL_DATASETS,
)
# Map of cuGraph input types to the expected output type for cuGraph
# connected_components calls.
cuGraph_input_output_map = {
cugraph.Graph: cudf.DataFrame,
cp_coo_matrix: tuple,
cp_csr_matrix: tuple,
cp_csc_matrix: tuple,
sp_coo_matrix: tuple,
sp_csr_matrix: tuple,
sp_csc_matrix: tuple,
}
cupy_types = [cp_coo_matrix, cp_csr_matrix, cp_csc_matrix]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Helper functions
# =============================================================================
def cugraph_call(gpu_benchmark_callable, input_G_or_matrix, source, edgevals=True):
"""
Call cugraph.sssp on input_G_or_matrix, then convert the result to a
standard format (dictionary of vertex IDs to (distance, predecessor)
tuples) for easy checking in the test code.
"""
result = gpu_benchmark_callable(cugraph.sssp, input_G_or_matrix, source)
input_type = type(input_G_or_matrix)
expected_return_type = cuGraph_input_output_map[type(input_G_or_matrix)]
assert type(result) is expected_return_type
# Convert cudf and pandas: DF of 3 columns: (vertex, distance, predecessor)
if expected_return_type in [cudf.DataFrame, pd.DataFrame]:
if expected_return_type is pd.DataFrame:
result = cudf.from_pandas(result)
if np.issubdtype(result["distance"].dtype, np.integer):
max_val = np.iinfo(result["distance"].dtype).max
else:
max_val = np.finfo(result["distance"].dtype).max
verts = result["vertex"].to_numpy()
dists = result["distance"].to_numpy()
preds = result["predecessor"].to_numpy()
# A CuPy/SciPy input means the return value will be a 2-tuple of:
# distance: cupy.ndarray
# ndarray of shortest distances between source and vertex.
# predecessor: cupy.ndarray
# ndarray of predecessors of a vertex on the path from source, which
# can be used to reconstruct the shortest paths.
elif expected_return_type is tuple:
if input_type in cupy_types:
assert type(result[0]) is cp.ndarray
assert type(result[1]) is cp.ndarray
else:
assert type(result[0]) is np.ndarray
assert type(result[1]) is np.ndarray
if np.issubdtype(result[0].dtype, np.integer):
max_val = np.iinfo(result[0].dtype).max
else:
max_val = np.finfo(result[0].dtype).max
# Get unique verts from input since they are not incuded in output
if type(input_G_or_matrix) in [
cp_csr_matrix,
cp_csc_matrix,
sp_csr_matrix,
sp_csc_matrix,
]:
coo = input_G_or_matrix.tocoo(copy=False)
else:
coo = input_G_or_matrix
verts = sorted(set([n.item() for n in coo.col] + [n.item() for n in coo.row]))
dists = [n.item() for n in result[0]]
preds = [n.item() for n in result[1]]
assert len(verts) == len(dists) == len(preds)
else:
raise RuntimeError(f"unsupported return type: {expected_return_type}")
result_dict = dict(zip(verts, zip(dists, preds)))
return result_dict, max_val
def resultset_call(graph_file, source, load_results, edgevals=True):
dataset_path = graph_file.get_path()
dataset_name = graph_file.metadata["name"]
if edgevals is False:
# FIXME: no test coverage if edgevals is False, this assertion is never reached
assert False
golden_paths = get_resultset(
resultset_name="traversal",
algo="single_source_shortest_path_length",
graph_dataset=dataset_name,
graph_directed=str(True),
source=str(source),
)
else:
# FIXME: The golden results (nx) below doesn't return accurate results as it
# seems to not support 'weights'. It matches cuGraph result only if the weight
# column is 1s.
golden_paths = get_resultset(
resultset_name="traversal",
algo="single_source_dijkstra_path_length",
graph_dataset=dataset_name,
graph_directed=str(True),
source=str(source),
)
golden_paths = cudf.Series(
golden_paths.distance.values, index=golden_paths.vertex
).to_dict()
G = graph_file.get_graph(
create_using=cugraph.Graph(directed=True), ignore_weights=not edgevals
)
return (G, dataset_path, graph_file, source, golden_paths)
# =============================================================================
# Pytest fixtures
# =============================================================================
# Call gen_fixture_params_product() to calculate the cartesian product of
# multiple lists of params. This is required since parameterized fixtures do
# not do this automatically (unlike multiply-parameterized tests). The 2nd
# item in the tuple is a label for the param value used when displaying the
# full test name.
# FIXME: tests with datasets like 'netscience' which has a weight column different
# than than 1's fail because it looks like netwokX doesn't consider weights during
# the computation.
DATASETS = [pytest.param(d) for d in SMALL_DATASETS]
SOURCES = [pytest.param(1)]
fixture_params = gen_fixture_params_product((DATASETS, "ds"), (SOURCES, "src"))
fixture_params_single_dataset = gen_fixture_params_product(
([DATASETS[0]], "ds"), (SOURCES, "src")
)
# Fixture that loads all golden results necessary to run cugraph tests if the
# tests are not already present in the designated results directory. Most of the
# time, this will only check if the module-specific mapping file exists.
@pytest.fixture(scope="module")
def load_traversal_results():
load_resultset(
"traversal", "https://data.rapids.ai/cugraph/results/resultsets.tar.gz"
)
@pytest.fixture(scope="module", params=fixture_params)
def dataset_source_goldenresults(request):
# request.param is a tuple of params from fixture_params. When expanded
# with *, will be passed to resultset_call() as args (graph_file, source)
return resultset_call(*(request.param), load_traversal_results)
@pytest.fixture(scope="module", params=fixture_params_single_dataset)
def single_dataset_source_goldenresults(request):
return resultset_call(*(request.param), load_traversal_results)
@pytest.fixture(scope="module", params=fixture_params)
def dataset_source_goldenresults_weighted(request):
return resultset_call(*(request.param), load_traversal_results, edgevals=True)
@pytest.fixture(scope="module", params=fixture_params_single_dataset)
def single_dataset_source_goldenresults_weighted(request):
return resultset_call(*(request.param), load_traversal_results, edgevals=True)
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
@pytest.mark.parametrize("cugraph_input_type", utils.CUGRAPH_DIR_INPUT_TYPES)
def test_sssp(gpubenchmark, dataset_source_goldenresults, cugraph_input_type):
# Extract the params generated from the fixture
(G, dataset_path, _, source, golden_paths) = dataset_source_goldenresults
if not isinstance(cugraph_input_type, cugraph.Graph):
input_G_or_matrix = utils.create_obj_from_csv(
dataset_path, cugraph_input_type, edgevals=True
)
else:
input_G_or_matrix = G
cu_paths, max_val = cugraph_call(gpubenchmark, input_G_or_matrix, source)
# Calculating mismatch
err = 0
for vid in cu_paths:
# Validate vertices that are reachable
# NOTE : If distance type is float64 then cu_paths[vid][0]
# should be compared against np.finfo(np.float64).max)
if cu_paths[vid][0] != max_val:
if cu_paths[vid][0] != golden_paths[vid]:
err = err + 1
# check pred dist + 1 = current dist (since unweighted)
pred = cu_paths[vid][1]
if vid != source and cu_paths[pred][0] + 1 != cu_paths[vid][0]:
err = err + 1
else:
if vid in golden_paths.keys():
err = err + 1
assert err == 0
@pytest.mark.sg
@pytest.mark.parametrize("cugraph_input_type", utils.CUGRAPH_DIR_INPUT_TYPES)
def test_sssp_invalid_start(
gpubenchmark, dataset_source_goldenresults, cugraph_input_type
):
(G, _, _, source, _) = dataset_source_goldenresults
el = G.view_edge_list()
newval = max(el.src.max(), el.dst.max()) + 1
source = newval
with pytest.raises(ValueError):
cugraph_call(gpubenchmark, G, source)
@pytest.mark.sg
@pytest.mark.parametrize("cugraph_input_type", utils.MATRIX_INPUT_TYPES)
def test_sssp_nonnative_inputs_matrix(
gpubenchmark, single_dataset_source_goldenresults, cugraph_input_type
):
test_sssp(gpubenchmark, single_dataset_source_goldenresults, cugraph_input_type)
@pytest.mark.sg
@pytest.mark.parametrize("directed", [True, False])
def test_sssp_nonnative_inputs_graph(single_dataset_source_goldenresults, directed):
(_, _, graph_file, source, golden_paths) = single_dataset_source_goldenresults
dataset_name = graph_file.metadata["name"]
result = get_resultset(
resultset_name="traversal",
algo="sssp_nonnative",
graph_dataset=dataset_name,
graph_directed=str(directed),
source=str(source),
)
if np.issubdtype(result["distance"].dtype, np.integer):
max_val = np.iinfo(result["distance"].dtype).max
else:
max_val = np.finfo(result["distance"].dtype).max
verts = result["vertex"].to_numpy()
dists = result["distance"].to_numpy()
preds = result["predecessor"].to_numpy()
cu_paths = dict(zip(verts, zip(dists, preds)))
# Calculating mismatch
err = 0
for vid in cu_paths:
# Validate vertices that are reachable
# NOTE : If distance type is float64 then cu_paths[vid][0]
# should be compared against np.finfo(np.float64).max)
if cu_paths[vid][0] != max_val:
if cu_paths[vid][0] != golden_paths[vid]:
err = err + 1
# check pred dist + 1 = current dist (since unweighted)
pred = cu_paths[vid][1]
if vid != source and cu_paths[pred][0] + 1 != cu_paths[vid][0]:
err = err + 1
else:
if vid in golden_paths.keys():
err = err + 1
assert err == 0
@pytest.mark.sg
@pytest.mark.parametrize("cugraph_input_type", utils.CUGRAPH_DIR_INPUT_TYPES)
def test_sssp_edgevals(
gpubenchmark, dataset_source_goldenresults_weighted, cugraph_input_type
):
# Extract the params generated from the fixture
(G, _, _, source, golden_paths) = dataset_source_goldenresults_weighted
input_G_or_matrix = G
cu_paths, max_val = cugraph_call(
gpubenchmark, input_G_or_matrix, source, edgevals=True
)
# Calculating mismatch
err = 0
for vid in cu_paths:
# Validate vertices that are reachable
# NOTE : If distance type is float64 then cu_paths[vid][0]
# should be compared against np.finfo(np.float64).max)
distances = cugraph.sssp(G, source=vid)
if cu_paths[vid][0] != max_val:
if cu_paths[vid][0] != golden_paths[vid]:
err = err + 1
# check pred dist + edge_weight = current dist
if vid != source:
pred = cu_paths[vid][1]
if G.has_edge(pred, vid):
edge_weight = distances[distances["vertex"] == pred].iloc[0, 0]
if cu_paths[pred][0] + edge_weight != cu_paths[vid][0]:
err = err + 1
else:
if vid in golden_paths.keys():
err = err + 1
assert err == 0
@pytest.mark.sg
@pytest.mark.parametrize(
"cugraph_input_type", utils.NX_DIR_INPUT_TYPES + utils.MATRIX_INPUT_TYPES
)
def test_sssp_edgevals_nonnative_inputs(
gpubenchmark, single_dataset_source_goldenresults_weighted, cugraph_input_type
):
test_sssp_edgevals(
gpubenchmark, single_dataset_source_goldenresults_weighted, cugraph_input_type
)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DATASETS)
@pytest.mark.parametrize("source", SOURCES)
def test_sssp_data_type_conversion(graph_file, source):
dataset_path = graph_file.get_path()
dataset_name = graph_file.metadata["name"]
cu_M = utils.read_csv_file(dataset_path)
# cugraph call with int32 weights
cu_M["2"] = cu_M["2"].astype(np.int32)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="2")
# assert cugraph weights is int32
assert G.edgelist.edgelist_df["weights"].dtype == np.int32
df = cugraph.sssp(G, source)
max_val = np.finfo(df["distance"].dtype).max
verts_np = df["vertex"].to_numpy()
dist_np = df["distance"].to_numpy()
pred_np = df["predecessor"].to_numpy()
cu_paths = dict(zip(verts_np, zip(dist_np, pred_np)))
golden_paths = get_resultset(
resultset_name="traversal",
algo="single_source_dijkstra_path_length",
graph_dataset=dataset_name,
graph_directed=str(True),
source=str(source),
test="data_type_conversion",
)
golden_paths = cudf.Series(
golden_paths.distance.values, index=golden_paths.vertex
).to_dict()
# Calculating mismatch
err = 0
for vid in cu_paths:
# Validate vertices that are reachable
# NOTE : If distance type is float64 then cu_paths[vid][0]
# should be compared against np.finfo(np.float64).max)
distances = cugraph.sssp(G, source=vid)
if cu_paths[vid][0] != max_val:
if cu_paths[vid][0] != golden_paths[vid]:
err = err + 1
# check pred dist + edge_weight = current dist
if vid != source:
pred = cu_paths[vid][1]
if G.has_edge(pred, vid):
edge_weight = distances[distances["vertex"] == pred].iloc[0, 0]
if cu_paths[pred][0] + edge_weight != cu_paths[vid][0]:
err = err + 1
else:
if vid in golden_paths.keys():
err = err + 1
assert err == 0
@pytest.mark.sg
def test_sssp_golden_edge_attr(load_traversal_results):
df = get_resultset(
resultset_name="traversal", algo="sssp_nonnative", test="network_edge_attr"
)
df = df.set_index("vertex")
assert df.loc[0, "distance"] == 0
assert df.loc[1, "distance"] == 10
assert df.loc[2, "distance"] == 30
@pytest.mark.sg
def test_scipy_api_compat():
graph_file = SMALL_DATASETS[0]
dataset_path = graph_file.get_path()
input_cugraph_graph = graph_file.get_graph()
input_coo_matrix = utils.create_obj_from_csv(
dataset_path, cp_coo_matrix, edgevals=True
)
# Ensure scipy-only options are rejected for cugraph inputs
with pytest.raises(TypeError):
cugraph.shortest_path(input_cugraph_graph, source=0, directed=False)
with pytest.raises(TypeError):
cugraph.shortest_path(input_cugraph_graph, source=0, unweighted=False)
with pytest.raises(TypeError):
cugraph.shortest_path(input_cugraph_graph, source=0, overwrite=False)
with pytest.raises(TypeError):
cugraph.shortest_path(input_cugraph_graph, source=0, return_predecessors=False)
# Ensure cugraph-compatible options work as expected
# cannot set both source and indices, but must set one
with pytest.raises(TypeError):
cugraph.shortest_path(input_cugraph_graph, source=0, indices=0)
with pytest.raises(TypeError):
cugraph.shortest_path(input_cugraph_graph)
with pytest.raises(ValueError):
cugraph.shortest_path(input_cugraph_graph, source=0, method="BF")
cugraph.shortest_path(input_cugraph_graph, indices=0)
with pytest.raises(ValueError):
cugraph.shortest_path(input_cugraph_graph, indices=[0, 1, 2])
cugraph.shortest_path(input_cugraph_graph, source=0, method="auto")
# Ensure SciPy options for matrix inputs work as expected
# cannot set both source and indices, but must set one
with pytest.raises(TypeError):
cugraph.shortest_path(input_coo_matrix, source=0, indices=0)
with pytest.raises(TypeError):
cugraph.shortest_path(input_coo_matrix)
with pytest.raises(ValueError):
cugraph.shortest_path(input_coo_matrix, source=0, method="BF")
cugraph.shortest_path(input_coo_matrix, source=0, method="auto")
with pytest.raises(ValueError):
cugraph.shortest_path(input_coo_matrix, source=0, directed=3)
cugraph.shortest_path(input_coo_matrix, source=0, directed=True)
cugraph.shortest_path(input_coo_matrix, source=0, directed=False)
with pytest.raises(ValueError):
cugraph.shortest_path(input_coo_matrix, source=0, return_predecessors=3)
(distances, preds) = cugraph.shortest_path(
input_coo_matrix, source=0, return_predecessors=True
)
distances = cugraph.shortest_path(
input_coo_matrix, source=0, return_predecessors=False
)
assert type(distances) != tuple
with pytest.raises(ValueError):
cugraph.shortest_path(input_coo_matrix, source=0, unweighted=False)
cugraph.shortest_path(input_coo_matrix, source=0, unweighted=True)
with pytest.raises(ValueError):
cugraph.shortest_path(input_coo_matrix, source=0, overwrite=True)
cugraph.shortest_path(input_coo_matrix, source=0, overwrite=False)
with pytest.raises(ValueError):
cugraph.shortest_path(input_coo_matrix, indices=[0, 1, 2])
cugraph.shortest_path(input_coo_matrix, indices=0)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
def test_sssp_csr_graph(graph_file):
df = graph_file.get_edgelist()
M = cupyx.scipy.sparse.coo_matrix(
(df["wgt"].to_cupy(), (df["src"].to_cupy(), df["dst"].to_cupy()))
)
M = M.tocsr()
offsets = cudf.Series(M.indptr)
indices = cudf.Series(M.indices)
weights = cudf.Series(M.data)
G_csr = cugraph.Graph()
G_coo = graph_file.get_graph()
source = G_coo.select_random_vertices(num_vertices=1)[0]
print("source = ", source)
G_csr.from_cudf_adjlist(offsets, indices, weights)
result_csr = cugraph.sssp(G_csr, source)
result_coo = cugraph.sssp(G_coo, source)
result_csr = result_csr.sort_values("vertex").reset_index(drop=True)
result_sssp = (
result_coo.sort_values("vertex")
.reset_index(drop=True)
.rename(columns={"distance": "distance_coo", "predecessor": "predecessor_coo"})
)
result_sssp["distance_csr"] = result_csr["distance"]
result_sssp["predecessor_csr"] = result_csr["predecessor"]
distance_diffs = result_sssp.query("distance_csr != distance_coo")
predecessor_diffs = result_sssp.query("predecessor_csr != predecessor_coo")
assert len(distance_diffs) == 0
assert len(predecessor_diffs) == 0
@pytest.mark.sg
def test_sssp_unweighted_graph():
karate = SMALL_DATASETS[0]
G = karate.get_graph(ignore_weights=True)
error_msg = (
"'SSSP' requires the input graph to be weighted."
"'BFS' should be used instead of 'SSSP' for unweighted graphs."
)
with pytest.raises(RuntimeError, match=error_msg):
cugraph.sssp(G, 1)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/data_store/test_property_graph.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import gc
import pytest
import pandas as pd
import numpy as np
import cudf
import cupy as cp
import cugraph
from cugraph.generators import rmat
from cugraph.datasets import cyber
from cudf.testing import assert_frame_equal, assert_series_equal
from pylibcugraph.testing.utils import gen_fixture_params_product
# If the rapids-pytest-benchmark plugin is installed, the "gpubenchmark"
# fixture will be available automatically. Check that this fixture is available
# by trying to import rapids_pytest_benchmark, and if that fails, set
# "gpubenchmark" to the standard "benchmark" fixture provided by
# pytest-benchmark.
try:
import rapids_pytest_benchmark # noqa: F401
except ImportError:
import pytest_benchmark
gpubenchmark = pytest_benchmark.plugin.benchmark
# FIXME: remove when fully-migrated to pandas 1.5.0
try:
# pandas 1.5.0
from pandas.errors import SettingWithCopyWarning as pandas_SettingWithCopyWarning
except ImportError:
# pandas 1.4
from pandas.core.common import (
SettingWithCopyWarning as pandas_SettingWithCopyWarning,
)
def type_is_categorical(pG):
return (
pG._vertex_prop_dataframe is None
or pG._vertex_prop_dataframe.dtypes[pG.type_col_name] == "category"
) and (
pG._edge_prop_dataframe is None
or pG._edge_prop_dataframe.dtypes[pG.type_col_name] == "category"
)
# =============================================================================
# Test data
# =============================================================================
dataset1 = {
"merchants": [
[
"merchant_id",
"merchant_location",
"merchant_size",
"merchant_sales",
"merchant_num_employees",
"merchant_name",
],
[
(11, 78750, 44, 123.2, 12, "north"),
(4, 78757, 112, 234.99, 18, "south"),
(21, 44145, 83, 992.1, 27, "east"),
(16, 47906, 92, 32.43, 5, "west"),
(86, 47906, 192, 2.43, 51, "west"),
],
],
"users": [
["user_id", "user_location", "vertical"],
[
(89021, 78757, 0),
(32431, 78750, 1),
(89216, 78757, 1),
(78634, 47906, 0),
],
],
"taxpayers": [
["payer_id", "amount"],
[
(11, 1123.98),
(4, 3243.7),
(21, 8932.3),
(16, 3241.77),
(86, 789.2),
(89021, 23.98),
(78634, 41.77),
],
],
"transactions": [
["user_id", "merchant_id", "volume", "time", "card_num", "card_type"],
[
(89021, 11, 33.2, 1639084966.5513437, 123456, "MC"),
(89216, 4, None, 1639085163.481217, 8832, "CASH"),
(78634, 16, 72.0, 1639084912.567394, 4321, "DEBIT"),
(32431, 4, 103.2, 1639084721.354346, 98124, "V"),
],
],
"relationships": [
["user_id_1", "user_id_2", "relationship_type"],
[
(89216, 89021, 9),
(89216, 32431, 9),
(32431, 78634, 8),
(78634, 89216, 8),
],
],
"referrals": [
["user_id_1", "user_id_2", "merchant_id", "stars"],
[
(89216, 78634, 11, 5),
(89021, 89216, 4, 4),
(89021, 89216, 21, 3),
(89021, 89216, 11, 3),
(89021, 78634, 21, 4),
(78634, 32431, 11, 4),
],
],
}
dataset2 = {
"simple": [
["src", "dst", "some_property"],
[
(99, 22, "a"),
(98, 34, "b"),
(97, 56, "c"),
(96, 88, "d"),
],
],
}
# CSV file contents used for testing various CSV-based use cases.
# These are to be used for test_single_csv_multi_vertex_edge_attrs()
edges_edgeprops_vertexprops_csv = """
src dst edge_attr1 edge_attr2 src_attr1 src_attr2 dst_attr1 dst_attr2
0 1 87 "a" 3.1 "v0" 1.3 "v1"
0 2 88 "b" 3.2 "v0" 1.1 "v2"
2 1 89 "c" 2.3 "v2" 1.9 "v1"
"""
vertexprops_csv = """
vertex attr1 attr2
0 32 dog
1 54 fish
2 87 cat
3 12 snake
4 901 gecko
"""
edgeprops_csv = """
v_src v_dst edge_id
0 1 123
0 2 432
2 1 789
"""
edgeid_edgeprops_csv = """
edge_id attr1 attr2
123 'PUT' 21.32
432 'POST' 21.44
789 'GET' 22.03
"""
# Placeholder for a directed Graph instance. This is not constructed here in
# order to prevent cuGraph code from running on import, which would prevent
# proper pytest collection if an exception is raised. See setup_function().
DiGraph_inst = None
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
global DiGraph_inst
gc.collect()
# Set the global DiGraph_inst. This is used for calls that require a Graph
# type or instance to be provided for tests that use a directed graph.
DiGraph_inst = cugraph.Graph(directed=True)
# =============================================================================
# Pytest fixtures
# =============================================================================
@pytest.fixture(scope="function", autouse=True)
def raise_on_pandas_warning():
"""Raise when pandas gives SettingWithCopyWarning warning"""
# Perhaps we should put this in pytest.ini, pyproject.toml, or conftest.py
import warnings
filters = list(warnings.filters)
warnings.filterwarnings("error", category=pandas_SettingWithCopyWarning)
yield
warnings.filters = filters
df_types = [cudf.DataFrame, pd.DataFrame]
def df_type_id(dataframe_type):
"""
Return a string that describes the dataframe_type, used for test output.
"""
s = "df_type="
if dataframe_type == cudf.DataFrame:
return s + "cudf.DataFrame"
if dataframe_type == pd.DataFrame:
return s + "pandas.DataFrame"
return s + "?"
df_types_fixture_params = gen_fixture_params_product((df_types, df_type_id))
@pytest.fixture(scope="function", params=df_types_fixture_params)
def dataset1_PropertyGraph(request):
"""
Fixture which returns an instance of a PropertyGraph with vertex and edge
data added from dataset1, parameterized for different DataFrame types.
"""
dataframe_type = request.param[0]
from cugraph.experimental import PropertyGraph
(
merchants,
users,
taxpayers,
transactions,
relationships,
referrals,
) = dataset1.values()
pG = PropertyGraph()
# Vertex and edge data is added as one or more DataFrames; either a Pandas
# DataFrame to keep data on the CPU, a cuDF DataFrame to keep data on GPU,
# or a dask_cudf DataFrame to keep data on distributed GPUs.
# For dataset1: vertices are merchants and users, edges are transactions,
# relationships, and referrals.
# property_columns=None (the default) means all columns except
# vertex_col_name will be used as properties for the vertices/edges.
pG.add_vertex_data(
dataframe_type(columns=merchants[0], data=merchants[1]),
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=None,
)
pG.add_vertex_data(
dataframe_type(columns=users[0], data=users[1]),
type_name="users",
vertex_col_name="user_id",
property_columns=None,
)
# Do not add taxpayers since that may now be considered invalid input (it
# adds the same vertices under different types, which leads to the same
# vertex ID appearing in the internal vertex prop table.
#
# FIXME: determine if this should be allowed or not then either remove
# "taxpayers" or uncomment it.
"""
pG.add_vertex_data(dataframe_type(columns=taxpayers[0],
data=taxpayers[1]),
type_name="taxpayers",
vertex_col_name="payer_id",
property_columns=None)
"""
pG.add_edge_data(
dataframe_type(columns=transactions[0], data=transactions[1]),
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
pG.add_edge_data(
dataframe_type(columns=relationships[0], data=relationships[1]),
type_name="relationships",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
pG.add_edge_data(
dataframe_type(columns=referrals[0], data=referrals[1]),
type_name="referrals",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
assert type_is_categorical(pG)
return (pG, dataset1)
@pytest.fixture(scope="module", params=df_types_fixture_params)
def dataset2_simple_PropertyGraph(request):
"""
Fixture which returns an instance of a PropertyGraph with only edge
data added from dataset2, parameterized for different DataFrame types.
"""
dataframe_type = request.param[0]
from cugraph.experimental import PropertyGraph
dataframe_type = cudf.DataFrame
simple = dataset2["simple"]
pG = PropertyGraph()
df = dataframe_type(columns=simple[0], data=simple[1])
pG.add_edge_data(df, vertex_col_names=("src", "dst"))
assert type_is_categorical(pG)
return (pG, simple)
@pytest.fixture(scope="module", params=df_types_fixture_params)
def cyber_PropertyGraph(request):
"""
Fixture which returns an instance of a PropertyGraph with vertex and edge
data added from the cyber.csv dataset, parameterized for different
DataFrame types.
"""
from cugraph.experimental import PropertyGraph
dataframe_type = request.param[0]
source_col_name = "srcip"
dest_col_name = "dstip"
df = cyber.get_edgelist()
if dataframe_type is pd.DataFrame:
df = df.to_pandas()
pG = PropertyGraph()
pG.add_edge_data(df, (source_col_name, dest_col_name))
assert type_is_categorical(pG)
return pG
@pytest.fixture(scope="module", params=df_types_fixture_params)
def rmat_PropertyGraph():
"""
Fixture which uses the RMAT generator to generate a cuDF DataFrame
edgelist, then uses it to add vertex and edge data to a PropertyGraph
instance, then returns the (PropertyGraph, DataFrame) instances in a tuple.
"""
from cugraph.experimental import PropertyGraph
source_col_name = "src"
dest_col_name = "dst"
weight_col_name = "weight"
scale = 20
edgefactor = 16
seed = 42
df = rmat(
scale,
(2**scale) * edgefactor,
0.57, # from Graph500
0.19, # from Graph500
0.19, # from Graph500
seed,
clip_and_flip=False,
scramble_vertex_ids=True,
create_using=None, # None == return edgelist
mg=False,
)
rng = np.random.default_rng(seed)
df[weight_col_name] = rng.random(size=len(df))
pG = PropertyGraph()
pG.add_edge_data(df, (source_col_name, dest_col_name))
assert type_is_categorical(pG)
return (pG, df)
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
@pytest.mark.parametrize("set_index", [True, False])
def test_add_vertex_data(df_type, set_index):
"""
add_vertex_data() on "merchants" table, all properties.
"""
from cugraph.experimental import PropertyGraph
merchants = dataset1["merchants"]
merchants_df = df_type(columns=merchants[0], data=merchants[1])
if set_index:
merchants_df.set_index("merchant_id", inplace=True)
pG = PropertyGraph()
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=None,
)
assert pG.get_num_vertices() == 5
assert pG.get_num_vertices("merchants") == 5
assert pG.get_num_edges() == 0
expected_props = set(merchants[0].copy()) - {"merchant_id"}
assert sorted(pG.vertex_property_names) == sorted(expected_props)
assert type_is_categorical(pG)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_num_vertices(df_type):
"""
Ensures get_num_vertices is correct after various additions of data.
"""
from cugraph.experimental import PropertyGraph
merchants = dataset1["merchants"]
merchants_df = df_type(columns=merchants[0], data=merchants[1])
pG = PropertyGraph()
assert pG.get_num_vertices() == 0
assert pG.get_num_vertices("unknown_type") == 0
assert pG.get_num_edges("unknown_type") == 0
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=None,
)
# Test caching - the second retrieval should always be faster
st = time.time()
assert pG.get_num_vertices() == 5
compute_time = time.time() - st
assert pG.get_num_edges() == 0
st = time.time()
assert pG.get_num_vertices() == 5
cache_retrieval_time = time.time() - st
assert cache_retrieval_time < compute_time
users = dataset1["users"]
users_df = df_type(columns=users[0], data=users[1])
pG.add_vertex_data(
users_df, type_name="users", vertex_col_name="user_id", property_columns=None
)
assert pG.get_num_vertices() == 9
assert pG.get_num_vertices("merchants") == 5
assert pG.get_num_vertices("users") == 4
assert pG.get_num_edges() == 0
# The taxpayers table does not add new unique vertices, it only adds
# properties to vertices already present in the merchants and users
# tables.
taxpayers = dataset1["taxpayers"]
taxpayers_df = df_type(columns=taxpayers[0], data=taxpayers[1])
pG.add_vertex_data(
taxpayers_df,
type_name="taxpayers",
vertex_col_name="payer_id",
property_columns=None,
)
assert pG.get_num_vertices() == 9
assert pG.get_num_vertices("merchants") == 5
assert pG.get_num_vertices("users") == 4
assert pG.get_num_vertices("unknown_type") == 0
assert pG.get_num_edges() == 0
assert type_is_categorical(pG)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_type_names(df_type):
from cugraph.experimental import PropertyGraph
pG = PropertyGraph()
assert pG.edge_types == set()
assert pG.vertex_types == set()
df = df_type(
{
"src": [99, 98, 97],
"dst": [22, 34, 56],
"some_property": ["a", "b", "c"],
}
)
pG.add_edge_data(df, vertex_col_names=("src", "dst"))
assert pG.edge_types == set([""])
assert pG.vertex_types == set([""])
df = df_type(
{
"vertex": [98, 97],
"some_property": ["a", "b"],
}
)
pG.add_vertex_data(df, type_name="vtype", vertex_col_name="vertex")
assert pG.edge_types == set([""])
assert pG.vertex_types == set(["", "vtype"])
df = df_type(
{
"src": [199, 98, 197],
"dst": [22, 134, 56],
"some_property": ["a", "b", "c"],
}
)
pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
assert pG.edge_types == set(["", "etype"])
assert pG.vertex_types == set(["", "vtype"])
assert type_is_categorical(pG)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_num_vertices_include_edge_data(df_type):
"""
Ensures get_num_vertices is correct after various additions of data.
"""
from cugraph.experimental import PropertyGraph
(
merchants,
users,
taxpayers,
transactions,
relationships,
referrals,
) = dataset1.values()
pG = PropertyGraph()
assert pG.get_num_vertices(include_edge_data=False) == 0
assert pG.get_num_vertices("", include_edge_data=False) == 0
pG.add_edge_data(
df_type(columns=transactions[0], data=transactions[1]),
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
assert pG.get_num_vertices(include_edge_data=False) == 0
assert pG.get_num_vertices("", include_edge_data=False) == 0
assert pG.get_num_vertices(include_edge_data=True) == 7
assert pG.get_num_vertices("", include_edge_data=True) == 7
pG.add_vertex_data(
df_type(columns=merchants[0], data=merchants[1]),
# type_name="merchants", # Use default!
vertex_col_name="merchant_id",
property_columns=None,
)
assert pG.get_num_vertices(include_edge_data=False) == 5
assert pG.get_num_vertices("", include_edge_data=False) == 5
assert pG.get_num_vertices(include_edge_data=True) == 9
assert pG.get_num_vertices("", include_edge_data=True) == 9
pG.add_vertex_data(
df_type(columns=users[0], data=users[1]),
type_name="users",
vertex_col_name="user_id",
property_columns=None,
)
assert pG.get_num_vertices(include_edge_data=False) == 9
assert pG.get_num_vertices("", include_edge_data=False) == 5
assert pG.get_num_vertices("users", include_edge_data=False) == 4
# All vertices now have vertex data, so this should match
assert pG.get_num_vertices(include_edge_data=True) == 9
assert pG.get_num_vertices("", include_edge_data=True) == 5
assert pG.get_num_vertices("users", include_edge_data=True) == 4
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_num_vertices_with_properties(df_type):
"""
Checks that the num_vertices_with_properties attr is set to the number of
vertices that have properties, as opposed to just num_vertices which also
includes all verts in the graph edgelist.
"""
from cugraph.experimental import PropertyGraph
pG = PropertyGraph()
df = df_type(
{
"src": [99, 98, 97],
"dst": [22, 34, 56],
"some_property": ["a", "b", "c"],
}
)
pG.add_edge_data(df, vertex_col_names=("src", "dst"))
assert pG.get_num_vertices() == 6
assert pG.get_num_vertices(include_edge_data=False) == 0
df = df_type(
{
"vertex": [98, 97],
"some_property": ["a", "b"],
}
)
pG.add_vertex_data(df, vertex_col_name="vertex")
assert pG.get_num_vertices() == 6
assert pG.get_num_vertices(include_edge_data=False) == 2
@pytest.mark.sg
def test_edges_attr(dataset2_simple_PropertyGraph):
"""
Ensure the edges attr returns the src, dst, edge_id columns properly.
"""
(pG, data) = dataset2_simple_PropertyGraph
# create a DF without the properties (ie. the last column)
expected_edges = cudf.DataFrame(
columns=[pG.src_col_name, pG.dst_col_name],
data=[(i, j) for (i, j, k) in data[1]],
)
actual_edges = pG.edges[[pG.src_col_name, pG.dst_col_name]]
assert_frame_equal(
expected_edges.sort_values(by=pG.src_col_name, ignore_index=True),
actual_edges.sort_values(by=pG.src_col_name, ignore_index=True),
)
edge_ids = pG.edges[pG.edge_id_col_name]
expected_num_edges = len(data[1])
assert len(edge_ids) == expected_num_edges
assert edge_ids.nunique() == expected_num_edges
@pytest.mark.sg
def test_get_vertex_data(dataset1_PropertyGraph):
"""
Ensure PG.get_vertex_data() returns the correct data based on vertex IDs
passed in.
"""
(pG, data) = dataset1_PropertyGraph
# Ensure the generated vertex IDs are unique
all_vertex_data = pG.get_vertex_data()
assert all_vertex_data[pG.vertex_col_name].nunique() == len(all_vertex_data)
# Test getting a subset of data
# Use the appropriate series type based on input
# FIXME: do not use the debug _vertex_prop_dataframe to determine type
if isinstance(pG._vertex_prop_dataframe, cudf.DataFrame):
vert_ids = cudf.Series([11, 4, 21])
else:
vert_ids = pd.Series([11, 4, 21])
some_vertex_data = pG.get_vertex_data(vert_ids)
actual_vertex_ids = some_vertex_data[pG.vertex_col_name]
if hasattr(actual_vertex_ids, "values_host"):
actual_vertex_ids = actual_vertex_ids.values_host
if hasattr(vert_ids, "values_host"):
vert_ids = vert_ids.values_host
assert sorted(actual_vertex_ids) == sorted(vert_ids)
expected_columns = set([pG.vertex_col_name, pG.type_col_name])
for d in ["merchants", "users"]:
for name in data[d][0]:
expected_columns.add(name)
expected_columns -= {"merchant_id", "user_id"}
actual_columns = set(some_vertex_data.columns)
assert actual_columns == expected_columns
# Test with specific columns and types
vert_type = "merchants"
columns = ["merchant_location", "merchant_size"]
some_vertex_data = pG.get_vertex_data(types=[vert_type], columns=columns)
# Ensure the returned df is the right length and includes only the
# vert/type + specified columns
standard_vert_columns = [pG.vertex_col_name, pG.type_col_name]
assert len(some_vertex_data) == len(data[vert_type][1])
assert sorted(some_vertex_data.columns) == sorted(columns + standard_vert_columns)
# Test with all params specified
vert_ids = [11, 4, 21]
vert_type = "merchants"
columns = ["merchant_location", "merchant_size"]
some_vertex_data = pG.get_vertex_data(
vertex_ids=vert_ids, types=[vert_type], columns=columns
)
# Ensure the returned df is the right length and includes at least the
# specified columns.
assert len(some_vertex_data) == len(vert_ids)
assert set(columns) - set(some_vertex_data.columns) == set()
# Allow a single vertex type and single vertex id to be passed in
df1 = pG.get_vertex_data(vertex_ids=[11], types=[vert_type])
df2 = pG.get_vertex_data(vertex_ids=11, types=vert_type)
assert len(df1) == 1
assert df1.shape == df2.shape
# assert_frame_equal(df1, df2, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_get_vertex_data_repeated(df_type):
from cugraph.experimental import PropertyGraph
df = df_type({"vertex": [2, 3, 4, 1], "feat": np.arange(4)})
pG = PropertyGraph()
pG.add_vertex_data(df, "vertex")
df1 = pG.get_vertex_data(vertex_ids=[2, 1, 3, 1], columns=["feat"])
expected = df_type(
{
pG.vertex_col_name: [2, 1, 3, 1],
pG.type_col_name: ["", "", "", ""],
"feat": [0, 3, 1, 3],
}
)
df1[pG.type_col_name] = df1[pG.type_col_name].astype(str) # Undo category
if df_type is cudf.DataFrame:
afe = assert_frame_equal
else:
afe = pd.testing.assert_frame_equal
expected["feat"] = expected["feat"].astype("Int64")
afe(df1, expected)
@pytest.mark.sg
def test_get_edge_data(dataset1_PropertyGraph):
"""
Ensure PG.get_edge_data() returns the correct data based on edge IDs passed
in.
"""
(pG, data) = dataset1_PropertyGraph
# Ensure the generated edge IDs are unique
all_edge_data = pG.get_edge_data()
assert all_edge_data[pG.edge_id_col_name].nunique() == len(all_edge_data)
# Test with specific edge IDs
edge_ids = [4, 5, 6]
some_edge_data = pG.get_edge_data(edge_ids)
actual_edge_ids = some_edge_data[pG.edge_id_col_name]
if hasattr(actual_edge_ids, "values_host"):
actual_edge_ids = actual_edge_ids.values_host
assert sorted(actual_edge_ids) == sorted(edge_ids)
# Create a list of expected column names from the three input tables
expected_columns = set(
[pG.src_col_name, pG.dst_col_name, pG.edge_id_col_name, pG.type_col_name]
)
for d in ["transactions", "relationships", "referrals"]:
for name in data[d][0]:
expected_columns.add(name)
expected_columns -= {"user_id", "user_id_1", "user_id_2"}
actual_columns = set(some_edge_data.columns)
assert actual_columns == expected_columns
# Test with specific columns and types
edge_type = "transactions"
columns = ["card_num", "card_type"]
some_edge_data = pG.get_edge_data(types=[edge_type], columns=columns)
# Ensure the returned df is the right length and includes only the
# src/dst/id/type + specified columns
standard_edge_columns = [
pG.src_col_name,
pG.dst_col_name,
pG.edge_id_col_name,
pG.type_col_name,
]
assert len(some_edge_data) == len(data[edge_type][1])
assert sorted(some_edge_data.columns) == sorted(columns + standard_edge_columns)
# Test with all params specified
# FIXME: since edge IDs are generated, assume that these are correct based
# on the intended edges being the first three added.
edge_ids = [0, 1, 2]
edge_type = "transactions"
columns = ["card_num", "card_type"]
some_edge_data = pG.get_edge_data(
edge_ids=edge_ids, types=[edge_type], columns=columns
)
# Ensure the returned df is the right length and includes at least the
# specified columns.
assert len(some_edge_data) == len(edge_ids)
assert set(columns) - set(some_edge_data.columns) == set()
# Allow a single edge type and single edge id to be passed in
df1 = pG.get_edge_data(edge_ids=[1], types=[edge_type])
df2 = pG.get_edge_data(edge_ids=1, types=edge_type)
assert len(df1) == 1
assert df1.shape == df2.shape
# assert_frame_equal(df1, df2, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_get_edge_data_repeated(df_type):
from cugraph.experimental import PropertyGraph
df = df_type({"src": [1, 1, 1, 2], "dst": [2, 3, 4, 1], "edge_feat": np.arange(4)})
pG = PropertyGraph()
pG.add_edge_data(df, vertex_col_names=["src", "dst"])
df1 = pG.get_edge_data(edge_ids=[2, 1, 3, 1], columns=["edge_feat"])
expected = df_type(
{
pG.edge_id_col_name: [2, 1, 3, 1],
pG.src_col_name: [1, 1, 2, 1],
pG.dst_col_name: [4, 3, 1, 3],
pG.type_col_name: ["", "", "", ""],
"edge_feat": [2, 1, 3, 1],
}
)
df1[pG.type_col_name] = df1[pG.type_col_name].astype(str) # Undo category
if df_type is cudf.DataFrame:
afe = assert_frame_equal
else:
afe = pd.testing.assert_frame_equal
for col in ["edge_feat", pG.src_col_name, pG.dst_col_name]:
expected[col] = expected[col].astype("Int64")
afe(df1, expected)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_null_data(df_type):
"""
test for null data
"""
from cugraph.experimental import PropertyGraph
pG = PropertyGraph()
assert pG.get_num_vertices() == 0
assert pG.get_num_edges() == 0
assert sorted(pG.vertex_property_names) == sorted([])
assert type_is_categorical(pG)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_add_vertex_data_prop_columns(df_type):
"""
add_vertex_data() on "merchants" table, subset of properties.
"""
from cugraph.experimental import PropertyGraph
merchants = dataset1["merchants"]
merchants_df = df_type(columns=merchants[0], data=merchants[1])
expected_props = ["merchant_name", "merchant_sales", "merchant_location"]
pG = PropertyGraph()
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=expected_props,
)
assert pG.get_num_vertices() == 5
assert pG.get_num_vertices("merchants") == 5
assert pG.get_num_edges() == 0
assert sorted(pG.vertex_property_names) == sorted(expected_props)
assert type_is_categorical(pG)
@pytest.mark.sg
def test_add_vertex_data_bad_args():
"""
add_vertex_data() with various bad args, checks that proper exceptions are
raised.
"""
from cugraph.experimental import PropertyGraph
merchants = dataset1["merchants"]
merchants_df = cudf.DataFrame(columns=merchants[0], data=merchants[1])
pG = PropertyGraph()
with pytest.raises(TypeError):
pG.add_vertex_data(
42,
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=None,
)
with pytest.raises(TypeError):
pG.add_vertex_data(
merchants_df,
type_name=42,
vertex_col_name="merchant_id",
property_columns=None,
)
with pytest.raises(ValueError):
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="bad_column_name",
property_columns=None,
)
with pytest.raises(ValueError):
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=["bad_column_name", "merchant_name"],
)
with pytest.raises(TypeError):
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
property_columns="merchant_name",
)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_add_edge_data(df_type):
"""
add_edge_data() on "transactions" table, all properties.
"""
from cugraph.experimental import PropertyGraph
transactions = dataset1["transactions"]
transactions_df = df_type(columns=transactions[0], data=transactions[1])
pG = PropertyGraph()
pG.add_edge_data(
transactions_df,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
assert pG.get_num_vertices() == 7
# 'transactions' is edge type, not vertex type
assert pG.get_num_vertices("transactions") == 0
assert pG.get_num_edges() == 4
assert pG.get_num_edges("transactions") == 4
# Original SRC and DST columns no longer include "merchant_id", "user_id"
expected_props = ["volume", "time", "card_num", "card_type"]
assert sorted(pG.edge_property_names) == sorted(expected_props)
assert type_is_categorical(pG)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_add_edge_data_prop_columns(df_type):
"""
add_edge_data() on "transactions" table, subset of properties.
"""
from cugraph.experimental import PropertyGraph
transactions = dataset1["transactions"]
transactions_df = df_type(columns=transactions[0], data=transactions[1])
expected_props = ["card_num", "card_type"]
pG = PropertyGraph()
pG.add_edge_data(
transactions_df,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=expected_props,
)
assert pG.get_num_vertices() == 7
# 'transactions' is edge type, not vertex type
assert pG.get_num_vertices("transactions") == 0
assert pG.get_num_edges() == 4
assert pG.get_num_edges("transactions") == 4
assert sorted(pG.edge_property_names) == sorted(expected_props)
assert type_is_categorical(pG)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
@pytest.mark.parametrize("set_index", [True, False])
def test_add_edge_data_with_ids(df_type, set_index):
"""
add_edge_data() on "transactions" table, all properties.
"""
from cugraph.experimental import PropertyGraph
transactions = dataset1["transactions"]
transactions_df = df_type(columns=transactions[0], data=transactions[1])
transactions_df["edge_id"] = list(range(10, 10 + len(transactions_df)))
transactions_ids = transactions_df["edge_id"]
if set_index:
transactions_df.set_index("edge_id", inplace=True)
pG = PropertyGraph()
pG.add_edge_data(
transactions_df,
type_name="transactions",
edge_id_col_name="edge_id",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
assert pG.get_num_vertices() == 7
# 'transactions' is edge type, not vertex type
assert pG.get_num_vertices("transactions") == 0
assert pG.get_num_edges() == 4
assert pG.get_num_edges("transactions") == 4
# Original SRC and DST columns no longer include "merchant_id", "user_id"
expected_props = ["volume", "time", "card_num", "card_type"]
assert sorted(pG.edge_property_names) == sorted(expected_props)
relationships = dataset1["relationships"]
relationships_df = df_type(columns=relationships[0], data=relationships[1])
# user-provided, then auto-gen (not allowed)
with pytest.raises(NotImplementedError):
pG.add_edge_data(
relationships_df,
type_name="relationships",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
relationships_df["edge_id"] = list(range(30, 30 + len(relationships_df)))
relationships_ids = relationships_df["edge_id"]
if set_index:
relationships_df.set_index("edge_id", inplace=True)
pG.add_edge_data(
relationships_df,
type_name="relationships",
edge_id_col_name="edge_id",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
if df_type is cudf.DataFrame:
ase = assert_series_equal
else:
ase = pd.testing.assert_series_equal
df = pG.get_edge_data(types="transactions")
ase(
df[pG.edge_id_col_name].sort_values().reset_index(drop=True),
transactions_ids,
check_names=False,
)
df = pG.get_edge_data(types="relationships")
ase(
df[pG.edge_id_col_name].sort_values().reset_index(drop=True),
relationships_ids,
check_names=False,
)
# auto-gen, then user-provided (not allowed)
pG = PropertyGraph()
pG.add_edge_data(
transactions_df,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
with pytest.raises(NotImplementedError):
pG.add_edge_data(
relationships_df,
type_name="relationships",
edge_id_col_name="edge_id",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
@pytest.mark.sg
def test_add_edge_data_bad_args():
"""
add_edge_data() with various bad args, checks that proper exceptions are
raised.
"""
from cugraph.experimental import PropertyGraph
transactions = dataset1["transactions"]
transactions_df = cudf.DataFrame(columns=transactions[0], data=transactions[1])
pG = PropertyGraph()
with pytest.raises(TypeError):
pG.add_edge_data(
42,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
with pytest.raises(TypeError):
pG.add_edge_data(
transactions_df,
type_name=42,
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
with pytest.raises(ValueError):
pG.add_edge_data(
transactions_df,
type_name="transactions",
vertex_col_names=("user_id", "bad_column"),
property_columns=None,
)
with pytest.raises(ValueError):
pG.add_edge_data(
transactions_df,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=["bad_column_name", "time"],
)
with pytest.raises(TypeError):
pG.add_edge_data(
transactions_df,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns="time",
)
with pytest.raises(TypeError):
pG.add_edge_data(
transactions_df,
type_name="transactions",
edge_id_col_name=42,
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
with pytest.raises(ValueError):
pG.add_edge_data(
transactions_df,
type_name="transactions",
edge_id_col_name="MISSING",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_vertex_prop_condition_only(
dataset1_PropertyGraph, as_pg_first
):
(pG, data) = dataset1_PropertyGraph
# This should result in two users: 78634 and 89216
selection = pG.select_vertices(
f"({pG.type_col_name}=='users') "
"& ((user_location<78750) | ((user_location==78757) & (vertical==1)))"
)
if as_pg_first:
G = pG.extract_subgraph(selection=selection, create_using=pG).extract_subgraph(
create_using=DiGraph_inst,
edge_weight_property="relationship_type",
default_edge_weight=99,
)
else:
G = pG.extract_subgraph(
selection=selection,
create_using=DiGraph_inst,
edge_weight_property="relationship_type",
default_edge_weight=99,
)
# Should result in two edges, one a "relationship", the other a "referral"
expected_edgelist = cudf.DataFrame(
{"src": [89216, 78634], "dst": [78634, 89216], "weights": [99, 8]}
)
if G.renumbered:
actual_edgelist = G.unrenumber(
G.edgelist.edgelist_df, "src", preserve_order=True
)
actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True)
else:
actual_edgelist = G.edgelist.edgelist_df
assert G.is_directed()
# check_like=True ignores differences in column/index ordering
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_vertex_edge_prop_condition(
dataset1_PropertyGraph, as_pg_first
):
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
selection = pG.select_vertices("(user_location==47906) | " "(user_location==78750)")
selection += pG.select_edges(f"{tcn}=='referrals'")
if as_pg_first:
G = pG.extract_subgraph(
selection=selection,
create_using=PropertyGraph,
).extract_subgraph(create_using=DiGraph_inst, edge_weight_property="stars")
else:
G = pG.extract_subgraph(
selection=selection, create_using=DiGraph_inst, edge_weight_property="stars"
)
expected_edgelist = cudf.DataFrame({"src": [78634], "dst": [32431], "weights": [4]})
if G.renumbered:
actual_edgelist = G.unrenumber(
G.edgelist.edgelist_df, "src", preserve_order=True
)
actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True)
else:
actual_edgelist = G.edgelist.edgelist_df
assert G.is_directed()
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_edge_prop_condition_only(dataset1_PropertyGraph, as_pg_first):
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
selection = pG.select_edges(f"{tcn} =='transactions'")
if as_pg_first:
G = pG.extract_subgraph(selection=selection, create_using=pG).extract_subgraph(
create_using=DiGraph_inst
)
else:
G = pG.extract_subgraph(selection=selection, create_using=DiGraph_inst)
# last item is the DataFrame rows
transactions = dataset1["transactions"][-1]
(srcs, dsts) = zip(*[(t[0], t[1]) for t in transactions])
expected_edgelist = cudf.DataFrame({"src": srcs, "dst": dsts})
expected_edgelist = expected_edgelist.sort_values(by="src", ignore_index=True)
if G.renumbered:
actual_edgelist = G.unrenumber(
G.edgelist.edgelist_df, "src", preserve_order=True
)
actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True)
else:
actual_edgelist = G.edgelist.edgelist_df
actual_edgelist = actual_edgelist.sort_values(by="src", ignore_index=True)
assert G.is_directed()
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_unweighted(dataset1_PropertyGraph, as_pg_first):
"""
Ensure a subgraph is unweighted if the edge_weight_property is None.
"""
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
selection = pG.select_edges(f"{tcn} == 'transactions'")
if as_pg_first:
G = pG.extract_subgraph(selection=selection, create_using=pG).extract_subgraph(
create_using=DiGraph_inst
)
else:
G = pG.extract_subgraph(selection=selection, create_using=DiGraph_inst)
assert G.is_weighted() is False
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_specific_query(dataset1_PropertyGraph, as_pg_first):
"""
Graph of only transactions after time 1639085000 for merchant_id 4 (should
be a graph of 2 vertices, 1 edge)
"""
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
# _DST_ below used to be referred to as merchant_id
selection = pG.select_edges(
f"({tcn}=='transactions') & " "(_DST_==4) & " "(time>1639085000)"
)
if as_pg_first:
G = pG.extract_subgraph(selection=selection, create_using=pG).extract_subgraph(
create_using=DiGraph_inst, edge_weight_property="card_num"
)
else:
G = pG.extract_subgraph(
selection=selection,
create_using=DiGraph_inst,
edge_weight_property="card_num",
)
expected_edgelist = cudf.DataFrame({"src": [89216], "dst": [4], "weights": [8832]})
if G.renumbered:
actual_edgelist = G.unrenumber(
G.edgelist.edgelist_df, "src", preserve_order=True
)
actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True)
else:
actual_edgelist = G.edgelist.edgelist_df
assert G.is_directed()
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_select_vertices_from_previous_selection(dataset1_PropertyGraph, as_pg_first):
"""
Ensures that the intersection of vertices of multiple types (only vertices
that are both type A and type B) can be selected.
"""
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
# Select referrals from only users 89216 and 78634 using an intentionally
# awkward query with separate select calls to test from_previous_selection
selection = pG.select_vertices(f"{tcn} == 'users'")
selection = pG.select_vertices(
"((user_location == 78757) & (vertical == 1)) " "| (user_location == 47906)",
from_previous_selection=selection,
)
selection += pG.select_edges(f"{tcn} == 'referrals'")
if as_pg_first:
G = pG.extract_subgraph(selection=selection, create_using=pG).extract_subgraph(
create_using=DiGraph_inst
)
else:
G = pG.extract_subgraph(create_using=DiGraph_inst, selection=selection)
expected_edgelist = cudf.DataFrame({"src": [89216], "dst": [78634]})
if G.renumbered:
actual_edgelist = G.unrenumber(
G.edgelist.edgelist_df, "src", preserve_order=True
)
actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True)
else:
actual_edgelist = G.edgelist.edgelist_df
assert G.is_directed()
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_graph_without_vert_props(as_pg_first):
"""
Ensure a subgraph can be extracted from a PropertyGraph that does not have
vertex properties.
"""
from cugraph.experimental import PropertyGraph
transactions = dataset1["transactions"]
relationships = dataset1["relationships"]
pG = PropertyGraph()
pG.add_edge_data(
cudf.DataFrame(columns=transactions[0], data=transactions[1]),
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
pG.add_edge_data(
cudf.DataFrame(columns=relationships[0], data=relationships[1]),
type_name="relationships",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
scn = PropertyGraph.src_col_name
if as_pg_first:
G = pG.extract_subgraph(
selection=pG.select_edges(f"{scn} == 89216"),
create_using=pG,
).extract_subgraph(
create_using=DiGraph_inst,
edge_weight_property="relationship_type",
default_edge_weight=0,
)
else:
G = pG.extract_subgraph(
selection=pG.select_edges(f"{scn} == 89216"),
create_using=DiGraph_inst,
edge_weight_property="relationship_type",
default_edge_weight=0,
)
expected_edgelist = cudf.DataFrame(
{"src": [89216, 89216, 89216], "dst": [4, 89021, 32431], "weights": [0, 9, 9]}
)
if G.renumbered:
actual_edgelist = G.unrenumber(
G.edgelist.edgelist_df, "src", preserve_order=True
)
actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True)
else:
actual_edgelist = G.edgelist.edgelist_df
assert G.is_directed()
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_no_edges(dataset1_PropertyGraph, as_pg_first):
"""
Valid query that only matches a single vertex.
"""
(pG, data) = dataset1_PropertyGraph
# "merchant_id" column is no longer saved; use as "_VERTEX_"
with pytest.raises(NameError, match="merchant_id"):
selection = pG.select_vertices("(_TYPE_=='merchants') & (merchant_id==86)")
selection = pG.select_vertices("(_TYPE_=='merchants') & (_VERTEX_==86)")
if as_pg_first:
G = pG.extract_subgraph(selection=selection, create_using=pG).extract_subgraph()
else:
G = pG.extract_subgraph(selection=selection)
assert G.is_directed()
assert len(G.edgelist.edgelist_df) == 0
@pytest.mark.sg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_no_query(dataset1_PropertyGraph, as_pg_first):
"""
Call extract with no args, should result in the entire property graph.
"""
(pG, data) = dataset1_PropertyGraph
if as_pg_first:
G = pG.extract_subgraph(create_using=pG).extract_subgraph(
create_using=DiGraph_inst, check_multi_edges=False
)
else:
G = pG.extract_subgraph(create_using=DiGraph_inst, check_multi_edges=False)
num_edges = (
len(dataset1["transactions"][-1])
+ len(dataset1["relationships"][-1])
+ len(dataset1["referrals"][-1])
)
# referrals has 3 edges with the same src/dst, so subtract 2 from
# the total count since this is not creating a multigraph..
num_edges -= 2
assert len(G.edgelist.edgelist_df) == num_edges
@pytest.mark.sg
def test_extract_subgraph_multi_edges(dataset1_PropertyGraph):
"""
Ensure an exception is thrown if a graph is attempted to be extracted with
multi edges.
NOTE: an option to allow multi edges when create_using is
MultiGraph will be provided in the future.
"""
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
# referrals has multiple edges
selection = pG.select_edges(f"{tcn} == 'referrals'")
# FIXME: use a better exception
with pytest.raises(RuntimeError):
pG.extract_subgraph(
selection=selection, create_using=DiGraph_inst, check_multi_edges=True
)
# Okay to return PropertyGraph, b/c check_multi_edges is ignored
pG.extract_subgraph(selection=selection, create_using=pG, check_multi_edges=True)
@pytest.mark.sg
def test_extract_subgraph_bad_args(dataset1_PropertyGraph):
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
# non-PropertySelection selection
with pytest.raises(TypeError):
pG.extract_subgraph(
selection=78750,
create_using=DiGraph_inst,
edge_weight_property="stars",
default_edge_weight=1.0,
)
selection = pG.select_edges(f"{tcn}=='referrals'")
# bad create_using type
with pytest.raises(TypeError):
pG.extract_subgraph(
selection=selection,
create_using=pytest,
edge_weight_property="stars",
default_edge_weight=1.0,
)
# invalid column name
with pytest.raises(ValueError):
pG.extract_subgraph(
selection=selection,
edge_weight_property="bad_column",
default_edge_weight=1.0,
)
# column name has None value for all results in subgraph and
# default_edge_weight is not set.
with pytest.raises(ValueError):
pG.extract_subgraph(selection=selection, edge_weight_property="card_type")
@pytest.mark.sg
def test_extract_subgraph_default_edge_weight(dataset1_PropertyGraph):
"""
Ensure the default_edge_weight value is added to edges with missing
properties used for weights.
"""
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
tcn = PropertyGraph.type_col_name
selection = pG.select_edges(f"{tcn}=='transactions'")
G = pG.extract_subgraph(
create_using=DiGraph_inst,
selection=selection,
edge_weight_property="volume",
default_edge_weight=99,
)
# last item is the DataFrame rows
transactions = dataset1["transactions"][-1]
(srcs, dsts, weights) = zip(*[(t[0], t[1], t[2]) for t in transactions])
# replace None with the expected value (convert to a list to replace)
weights_list = list(weights)
weights_list[weights.index(None)] = 99.0
weights = tuple(weights_list)
expected_edgelist = cudf.DataFrame({"src": srcs, "dst": dsts, "weights": weights})
expected_edgelist = expected_edgelist.sort_values(by="src", ignore_index=True)
if G.renumbered:
actual_edgelist = G.unrenumber(
G.edgelist.edgelist_df, "src", preserve_order=True
)
actual_edgelist = G.unrenumber(actual_edgelist, "dst", preserve_order=True)
else:
actual_edgelist = G.edgelist.edgelist_df
actual_edgelist = actual_edgelist.sort_values(by="src", ignore_index=True)
assert G.is_directed()
assert_frame_equal(expected_edgelist, actual_edgelist, check_like=True)
@pytest.mark.sg
def test_extract_subgraph_default_edge_weight_no_property(dataset1_PropertyGraph):
"""
Ensure default_edge_weight can be used to provide an edge value when a
property for the edge weight is not specified.
"""
(pG, data) = dataset1_PropertyGraph
edge_weight = 99.2
G = pG.extract_subgraph(default_edge_weight=edge_weight)
assert (G.edgelist.edgelist_df["weights"] == edge_weight).all()
@pytest.mark.sg
def test_extract_subgraph_nonrenumbered_noedgedata():
"""
Ensure a subgraph can be extracted that is not renumbered and contains no
edge_data.
"""
from cugraph.experimental import PropertyGraph
from cugraph import Graph
pG = PropertyGraph()
df = cudf.DataFrame(
{
"src": [99, 98, 97],
"dst": [22, 34, 56],
"some_property": ["a", "b", "c"],
}
)
pG.add_edge_data(df, vertex_col_names=("src", "dst"))
G = pG.extract_subgraph(
create_using=Graph(directed=True), renumber_graph=False, add_edge_data=False
)
expected_edgelist = cudf.DataFrame(
{
"src": [99, 98, 97],
"dst": [22, 34, 56],
}
)
assert_frame_equal(
expected_edgelist.sort_values(by="src", ignore_index=True),
G.edgelist.edgelist_df.sort_values(by="src", ignore_index=True),
)
assert hasattr(G, "edge_data") is False
@pytest.mark.sg
def test_graph_edge_data_added(dataset1_PropertyGraph):
"""
Ensures the subgraph returned from extract_subgraph() has the edge_data
attribute added which contains the proper edge IDs.
"""
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
eicn = PropertyGraph.edge_id_col_name
expected_num_edges = (
len(dataset1["transactions"][-1])
+ len(dataset1["relationships"][-1])
+ len(dataset1["referrals"][-1])
)
assert pG.get_num_edges() == expected_num_edges
assert pG.get_num_edges("transactions") == len(dataset1["transactions"][-1])
assert pG.get_num_edges("relationships") == len(dataset1["relationships"][-1])
assert pG.get_num_edges("referrals") == len(dataset1["referrals"][-1])
assert pG.get_num_edges("unknown_type") == 0
# extract_subgraph() should return a directed Graph object with additional
# meta-data, which includes edge IDs.
G = pG.extract_subgraph(create_using=DiGraph_inst, check_multi_edges=False)
# G.edge_data should be set to a DataFrame with rows for each graph edge.
assert len(G.edge_data) == expected_num_edges
edge_ids = sorted(G.edge_data[eicn].values)
assert edge_ids[0] == 0
assert edge_ids[-1] == (expected_num_edges - 1)
@pytest.mark.sg
def test_annotate_dataframe(dataset1_PropertyGraph):
"""
FIXME: Add tests for:
properties list
properties list with 1 or more bad props
copy=False
invalid args raise correct exceptions
"""
(pG, data) = dataset1_PropertyGraph
selection = pG.select_edges("(_TYPE_ == 'referrals') & (stars > 3)")
G = pG.extract_subgraph(selection=selection, create_using=DiGraph_inst)
df_type = type(pG._edge_prop_dataframe)
# Create an arbitrary DataFrame meant to represent an algo result,
# containing vertex IDs present in pG.
#
# Drop duplicate edges since actual results from a Graph object would not
# have them.
(srcs, dsts, mids, stars) = zip(*(dataset1["referrals"][1]))
algo_result = df_type({"from": srcs, "to": dsts, "result": range(len(srcs))})
algo_result.drop_duplicates(subset=["from", "to"], inplace=True, ignore_index=True)
new_algo_result = pG.annotate_dataframe(
algo_result, G, edge_vertex_col_names=("from", "to")
)
expected_algo_result = df_type(
{
"from": srcs,
"to": dsts,
"result": range(len(srcs)),
"merchant_id": mids,
"stars": stars,
}
)
# The integer dtypes of annotated properties are nullable integer dtypes,
# so convert for proper comparison.
expected_algo_result["merchant_id"] = expected_algo_result["merchant_id"].astype(
"Int64"
)
expected_algo_result["stars"] = expected_algo_result["stars"].astype("Int64")
expected_algo_result.drop_duplicates(
subset=["from", "to"], inplace=True, ignore_index=True
)
if df_type is cudf.DataFrame:
ase = assert_series_equal
else:
ase = pd.testing.assert_series_equal
# For now, the result will include extra columns from edge types not
# included in the df being annotated, so just check for known columns.
for col in ["from", "to", "result", "merchant_id", "stars"]:
ase(new_algo_result[col], expected_algo_result[col])
@pytest.mark.sg
def test_different_vertex_edge_input_dataframe_types():
"""
Ensures that a PropertyGraph initialized with one DataFrame type cannot be
extended with another.
"""
df = cudf.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pdf = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
from cugraph.experimental import PropertyGraph
pG = PropertyGraph()
pG.add_vertex_data(df, type_name="foo", vertex_col_name="a")
with pytest.raises(TypeError):
pG.add_edge_data(pdf, type_name="bar", vertex_col_names=("a", "b"))
pG = PropertyGraph()
pG.add_vertex_data(pdf, type_name="foo", vertex_col_name="a")
with pytest.raises(TypeError):
pG.add_edge_data(df, type_name="bar", vertex_col_names=("a", "b"))
# Different order
pG = PropertyGraph()
pG.add_edge_data(df, type_name="bar", vertex_col_names=("a", "b"))
with pytest.raises(TypeError):
pG.add_vertex_data(pdf, type_name="foo", vertex_col_name="a")
# Same API call, different types
pG = PropertyGraph()
pG.add_vertex_data(df, type_name="foo", vertex_col_name="a")
with pytest.raises(TypeError):
pG.add_vertex_data(pdf, type_name="foo", vertex_col_name="a")
pG = PropertyGraph()
pG.add_edge_data(df, type_name="bar", vertex_col_names=("a", "b"))
with pytest.raises(TypeError):
pG.add_edge_data(pdf, type_name="bar", vertex_col_names=("a", "b"))
@pytest.mark.sg
def test_get_vertices(dataset1_PropertyGraph):
"""
Test that get_vertices() returns the correct set of vertices without
duplicates.
"""
(pG, data) = dataset1_PropertyGraph
(
merchants,
users,
taxpayers,
transactions,
relationships,
referrals,
) = dataset1.values()
expected_vertices = set(
[t[0] for t in merchants[1]]
+ [t[0] for t in users[1]]
+ [t[0] for t in taxpayers[1]]
)
assert sorted(pG.get_vertices().values) == sorted(expected_vertices)
@pytest.mark.sg
def test_get_edges(dataset1_PropertyGraph):
"""
Test that get_edges() returns the correct set of edges (as src/dst
columns).
"""
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
(
merchants,
users,
taxpayers,
transactions,
relationships,
referrals,
) = dataset1.values()
expected_edges = (
[(src, dst) for (src, dst, _, _, _, _) in transactions[1]]
+ [(src, dst) for (src, dst, _) in relationships[1]]
+ [(src, dst) for (src, dst, _, _) in referrals[1]]
)
actual_edges = pG.edges
assert len(expected_edges) == len(actual_edges)
for i in range(len(expected_edges)):
src = actual_edges[PropertyGraph.src_col_name].iloc[i]
dst = actual_edges[PropertyGraph.dst_col_name].iloc[i]
assert (src, dst) in expected_edges
@pytest.mark.sg
def test_property_names_attrs(dataset1_PropertyGraph):
"""
Ensure the correct number of user-visible properties for vertices and edges
are returned. This should exclude the internal bookkeeping properties.
"""
(pG, data) = dataset1_PropertyGraph
# _VERTEX_ columns: "merchant_id", "user_id"
expected_vert_prop_names = [
"merchant_location",
"merchant_size",
"merchant_sales",
"merchant_num_employees",
"user_location",
"merchant_name",
"vertical",
]
# _SRC_ and _DST_ columns: "user_id", "user_id_1", "user_id_2"
# Note that "merchant_id" is a property in for type "transactions"
expected_edge_prop_names = [
"merchant_id",
"volume",
"time",
"card_num",
"card_type",
"relationship_type",
"stars",
]
# Extracting a subgraph with weights has/had a side-effect of adding a
# weight column, so call extract_subgraph() to ensure the internal weight
# column name is not present.
pG.extract_subgraph(default_edge_weight=1.0)
actual_vert_prop_names = pG.vertex_property_names
actual_edge_prop_names = pG.edge_property_names
assert sorted(actual_vert_prop_names) == sorted(expected_vert_prop_names)
assert sorted(actual_edge_prop_names) == sorted(expected_edge_prop_names)
@pytest.mark.sg
@pytest.mark.skip(reason="unfinished")
def test_extract_subgraph_with_vertex_ids():
"""
FIXME: add a PropertyGraph API that makes it easy to support the common use
case of extracting a subgraph containing only specific vertex IDs. This is
currently done in the bench_extract_subgraph_for_* tests below, but could
be made easier for users to do.
"""
raise NotImplementedError
@pytest.mark.sg
def test_get_data_empty_graphs():
"""
Ensures that calls to pG.get_*_data() on an empty pG are handled correctly.
"""
from cugraph.experimental import PropertyGraph
pG = PropertyGraph()
assert pG.get_vertex_data() is None
assert pG.get_vertex_data([0, 1, 2]) is None
assert pG.get_edge_data() is None
assert pG.get_edge_data([0, 1, 2]) is None
@pytest.mark.sg
@pytest.mark.parametrize("prev_id_column", [None, "prev_id"])
def test_renumber_vertices_by_type(dataset1_PropertyGraph, prev_id_column):
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
with pytest.raises(ValueError, match="existing column"):
pG.renumber_vertices_by_type("merchant_size")
df_id_ranges = pG.renumber_vertices_by_type(prev_id_column)
expected = {
"merchants": [0, 4], # stop is inclusive
"users": [5, 8],
}
for key, (start, stop) in expected.items():
assert df_id_ranges.loc[key, "start"] == start
assert df_id_ranges.loc[key, "stop"] == stop
df = pG.get_vertex_data(types=[key])
if isinstance(df, cudf.DataFrame):
df = df.to_pandas()
assert len(df) == stop - start + 1
assert df["_VERTEX_"].tolist() == list(range(start, stop + 1))
if prev_id_column is not None:
cur = df[prev_id_column].sort_values()
expected = sorted(x for x, *args in data[key][1])
assert cur.tolist() == expected
# Make sure we renumber vertex IDs in edge data too
df = pG.get_edge_data()
assert 0 <= df[pG.src_col_name].min() < df[pG.src_col_name].max() < 9
assert 0 <= df[pG.dst_col_name].min() < df[pG.dst_col_name].max() < 9
empty_pG = PropertyGraph()
assert empty_pG.renumber_vertices_by_type(prev_id_column) is None
# Test when vertex IDs only exist in edge data
df = type(df)({"src": [99998], "dst": [99999]})
empty_pG.add_edge_data(df, ["src", "dst"])
with pytest.raises(NotImplementedError, match="only exist in edge"):
empty_pG.renumber_vertices_by_type(prev_id_column)
@pytest.mark.sg
@pytest.mark.parametrize("prev_id_column", [None, "prev_id"])
def test_renumber_edges_by_type(dataset1_PropertyGraph, prev_id_column):
from cugraph.experimental import PropertyGraph
(pG, data) = dataset1_PropertyGraph
with pytest.raises(ValueError, match="existing column"):
pG.renumber_edges_by_type("time")
df_id_ranges = pG.renumber_edges_by_type(prev_id_column)
expected = {
"transactions": [0, 3], # stop is inclusive
"relationships": [4, 7],
"referrals": [8, 13],
# Results are no longer alphabetical b/c use of categoricals for types
# "referrals": [0, 5], # stop is inclusive
# "relationships": [6, 9],
# "transactions": [10, 13],
}
for key, (start, stop) in expected.items():
assert df_id_ranges.loc[key, "start"] == start
assert df_id_ranges.loc[key, "stop"] == stop
df = pG.get_edge_data(types=[key])
if isinstance(df, cudf.DataFrame):
df = df.to_pandas()
assert len(df) == stop - start + 1
assert df[pG.edge_id_col_name].tolist() == list(range(start, stop + 1))
if prev_id_column is not None:
assert prev_id_column in df.columns
empty_pG = PropertyGraph()
assert empty_pG.renumber_edges_by_type(prev_id_column) is None
@pytest.mark.sg
def test_renumber_vertices_edges_dtypes():
from cugraph.experimental import PropertyGraph
edgelist_df = cudf.DataFrame(
{
"src": cp.array([0, 5, 2, 3, 4, 3], dtype="int32"),
"dst": cp.array([2, 4, 4, 5, 1, 2], dtype="int32"),
"eid": cp.array([8, 7, 5, 2, 9, 1], dtype="int32"),
}
)
vertex_df = cudf.DataFrame(
{"v": cp.array([0, 1, 2, 3, 4, 5], dtype="int32"), "p": [5, 10, 15, 20, 25, 30]}
)
pG = PropertyGraph()
pG.add_vertex_data(vertex_df, vertex_col_name="v", property_columns=["p"])
pG.add_edge_data(
edgelist_df, vertex_col_names=["src", "dst"], edge_id_col_name="eid"
)
pG.renumber_vertices_by_type()
vd = pG.get_vertex_data()
assert vd.index.dtype == cp.int32
pG.renumber_edges_by_type()
ed = pG.get_edge_data()
assert ed.index.dtype == cp.int32
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_add_data_noncontiguous(df_type):
from cugraph.experimental import PropertyGraph
df = df_type(
{
"src": [0, 0, 1, 2, 2, 3, 3, 1, 2, 4],
"dst": [1, 2, 4, 3, 3, 1, 2, 4, 4, 3],
"edge_type": [
"pig",
"dog",
"cat",
"pig",
"cat",
"pig",
"dog",
"pig",
"cat",
"dog",
],
}
)
counts = df["edge_type"].value_counts()
pG = PropertyGraph()
for edge_type in ["cat", "dog", "pig"]:
pG.add_edge_data(
df[df.edge_type == edge_type],
vertex_col_names=["src", "dst"],
type_name=edge_type,
)
if df_type is cudf.DataFrame:
ase = assert_series_equal
else:
ase = pd.testing.assert_series_equal
for edge_type in ["cat", "dog", "pig"]:
cur_df = pG.get_edge_data(types=edge_type)
assert len(cur_df) == counts[edge_type]
ase(
cur_df[pG.type_col_name].astype(str),
cur_df["edge_type"],
check_names=False,
)
df["vertex"] = (
100 * df["src"]
+ df["dst"]
+ df["edge_type"].map({"pig": 0, "dog": 10, "cat": 20})
)
pG = PropertyGraph()
for edge_type in ["cat", "dog", "pig"]:
pG.add_vertex_data(
df[df.edge_type == edge_type], vertex_col_name="vertex", type_name=edge_type
)
for edge_type in ["cat", "dog", "pig"]:
cur_df = pG.get_vertex_data(types=edge_type)
assert len(cur_df) == counts[edge_type]
ase(
cur_df[pG.type_col_name].astype(str),
cur_df["edge_type"],
check_names=False,
)
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_vertex_ids_different_type(df_type):
"""Getting the number of vertices requires combining vertex ids from
multiple columns.
This test ensures combining these columns works even if they are different types.
"""
from cugraph.experimental import PropertyGraph
if df_type is pd.DataFrame:
series_type = pd.Series
else:
series_type = cudf.Series
pg = PropertyGraph()
node_df = df_type()
node_df["node_id"] = series_type([0, 1, 2]).astype("int32")
pg.add_vertex_data(node_df, "node_id", type_name="_N")
edge_df = df_type()
edge_df["src"] = series_type([0, 1, 2]).astype("int32")
edge_df["dst"] = series_type([0, 1, 2]).astype("int64")
pg.add_edge_data(edge_df, ["src", "dst"], type_name="_E")
assert pg.get_num_vertices() == 3
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_vertex_vector_property(df_type):
from cugraph.experimental import PropertyGraph
(
merchants,
users,
taxpayers,
transactions,
relationships,
referrals,
) = dataset1.values()
if df_type is cudf.DataFrame:
assert_array_equal = cp.testing.assert_array_equal
zeros = cp.zeros
else:
assert_array_equal = np.testing.assert_array_equal
zeros = np.zeros
pG = PropertyGraph()
merchants_df = df_type(columns=merchants[0], data=merchants[1])
with pytest.raises(ValueError):
# Column doesn't exist
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={"vec1": ["merchant_location", "BAD_NAME"]},
)
with pytest.raises(ValueError):
# Using reserved name
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={
pG.type_col_name: ["merchant_location", "merchant_size"]
},
)
with pytest.raises(TypeError):
# String value invalid
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={"vec1": "merchant_location"},
)
with pytest.raises(ValueError):
# Length-0 vector not allowed
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={"vec1": []},
)
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={
"vec1": ["merchant_location", "merchant_size", "merchant_num_employees"]
},
)
df = pG.get_vertex_data()
expected_columns = {
pG.vertex_col_name,
pG.type_col_name,
"merchant_sales",
"merchant_name",
"vec1",
}
assert set(df.columns) == expected_columns
expected = merchants_df[
["merchant_location", "merchant_size", "merchant_num_employees"]
].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
vec1 = pG.vertex_vector_property_to_array(df, "vec1")
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(expected, vec1)
vec1 = pG.vertex_vector_property_to_array(df, "vec1", missing="error")
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(expected, vec1)
with pytest.raises(ValueError):
pG.vertex_vector_property_to_array(df, "BAD_NAME")
users_df = df_type(columns=users[0], data=users[1])
with pytest.raises(ValueError):
# Length doesn't match existing vector
pG.add_vertex_data(
users_df,
type_name="users",
vertex_col_name="user_id",
property_columns=["vertical"],
vector_properties={"vec1": ["user_location", "vertical"]},
)
with pytest.raises(ValueError):
# Can't assign property to existing vector column
pG.add_vertex_data(
users_df.assign(vec1=users_df["user_id"]),
type_name="users",
vertex_col_name="user_id",
property_columns=["vec1"],
)
pG.add_vertex_data(
users_df,
type_name="users",
vertex_col_name="user_id",
property_columns=["vertical"],
vector_properties={"vec2": ["user_location", "vertical"]},
)
expected_columns.update({"vec2", "vertical"})
df = pG.get_vertex_data()
assert set(df.columns) == expected_columns
vec1 = pG.vertex_vector_property_to_array(df, "vec1")
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(expected, vec1)
with pytest.raises(RuntimeError):
pG.vertex_vector_property_to_array(df, "vec1", missing="error")
pGusers = PropertyGraph()
pGusers.add_vertex_data(
users_df,
type_name="users",
vertex_col_name="user_id",
vector_property="vec3",
)
vec2 = pG.vertex_vector_property_to_array(df, "vec2")
vec2 = vec2[np.lexsort(vec2.T)] # may be jumbled, so sort
df2 = pGusers.get_vertex_data()
assert set(df2.columns) == {pG.vertex_col_name, pG.type_col_name, "vec3"}
vec3 = pGusers.vertex_vector_property_to_array(df2, "vec3")
vec3 = vec3[np.lexsort(vec3.T)] # may be jumbled, so sort
assert_array_equal(vec2, vec3)
vec1filled = pG.vertex_vector_property_to_array(df, "vec1", 0, missing="error")
vec1filled = vec1filled[np.lexsort(vec1filled.T)] # may be jumbled, so sort
expectedfilled = np.concatenate([zeros((4, 3), int), expected])
assert_array_equal(expectedfilled, vec1filled)
vec1filled = pG.vertex_vector_property_to_array(df, "vec1", [0, 0, 0])
vec1filled = vec1filled[np.lexsort(vec1filled.T)] # may be jumbled, so sort
assert_array_equal(expectedfilled, vec1filled)
with pytest.raises(ValueError, match="expected 3"):
pG.vertex_vector_property_to_array(df, "vec1", [0, 0])
vec2 = pG.vertex_vector_property_to_array(df, "vec2")
vec2 = vec2[np.lexsort(vec2.T)] # may be jumbled, so sort
expected = users_df[["user_location", "vertical"]].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
assert_array_equal(expected, vec2)
with pytest.raises(TypeError):
# Column is wrong type to be a vector
pG.vertex_vector_property_to_array(
df.rename(columns={"vec1": "vertical", "vertical": "vec1"}), "vec1"
)
with pytest.raises(ValueError):
# Vector column doesn't exist in dataframe
pG.vertex_vector_property_to_array(df.rename(columns={"vec1": "moved"}), "vec1")
with pytest.raises(TypeError):
# Bad type
pG.vertex_vector_property_to_array(42, "vec1")
@pytest.mark.sg
@pytest.mark.parametrize("df_type", df_types, ids=df_type_id)
def test_edge_vector_property(df_type):
from cugraph.experimental import PropertyGraph
if df_type is cudf.DataFrame:
assert_array_equal = cp.testing.assert_array_equal
else:
assert_array_equal = np.testing.assert_array_equal
df1 = df_type(
{
"src": [0, 1],
"dst": [1, 2],
"feat_0": [1, 2],
"feat_1": [10, 20],
"feat_2": [10, 20],
}
)
df2 = df_type(
{
"src": [2, 3],
"dst": [1, 2],
"feat_0": [0.5, 0.2],
"feat_1": [1.5, 1.2],
}
)
pG = PropertyGraph()
pG.add_edge_data(
df1, ("src", "dst"), vector_properties={"vec1": ["feat_0", "feat_1", "feat_2"]}
)
df = pG.get_edge_data()
expected_columns = {
pG.edge_id_col_name,
pG.src_col_name,
pG.dst_col_name,
pG.type_col_name,
"vec1",
}
assert set(df.columns) == expected_columns
expected = df1[["feat_0", "feat_1", "feat_2"]].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
pGalt = PropertyGraph()
pGalt.add_edge_data(df1, ("src", "dst"), vector_property="vec1")
dfalt = pG.get_edge_data()
for cur_pG, cur_df in [(pG, df), (pGalt, dfalt)]:
vec1 = cur_pG.edge_vector_property_to_array(cur_df, "vec1")
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(vec1, expected)
vec1 = cur_pG.edge_vector_property_to_array(cur_df, "vec1", missing="error")
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(vec1, expected)
pG.add_edge_data(
df2, ("src", "dst"), vector_properties={"vec2": ["feat_0", "feat_1"]}
)
df = pG.get_edge_data()
expected_columns.add("vec2")
assert set(df.columns) == expected_columns
expected = df2[["feat_0", "feat_1"]].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
vec2 = pG.edge_vector_property_to_array(df, "vec2")
vec2 = vec2[np.lexsort(vec2.T)] # may be jumbled, so sort
assert_array_equal(vec2, expected)
with pytest.raises(RuntimeError):
pG.edge_vector_property_to_array(df, "vec2", missing="error")
@pytest.mark.sg
@pytest.mark.skip(reason="feature not implemented")
def test_single_csv_multi_vertex_edge_attrs():
"""
Read an edgelist CSV that contains both edge and vertex attrs
"""
pass
@pytest.mark.sg
def test_fillna_vertices():
from cugraph.experimental import PropertyGraph
df_edgelist = cudf.DataFrame(
{
"src": [0, 7, 2, 0, 1, 3, 1, 4, 5, 6],
"dst": [1, 1, 1, 3, 2, 1, 6, 5, 6, 7],
"val": [1, None, 2, None, 3, None, 4, None, 5, None],
}
)
df_props = cudf.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5, 6, 7],
"a": [0, 1, None, 2, None, 4, 1, 8],
"b": [None, 1, None, 2, None, 3, 8, 9],
}
)
pG = PropertyGraph()
pG.add_edge_data(df_edgelist, vertex_col_names=["src", "dst"])
pG.add_vertex_data(df_props, vertex_col_name="id")
pG.fillna_vertices({"a": 2, "b": 3})
assert not pG.get_vertex_data(columns=["a", "b"]).isna().any().any()
assert pG.get_edge_data(columns=["val"]).isna().any().any()
expected_values_prop_a = [
0,
1,
2,
2,
2,
4,
1,
8,
]
assert pG.get_vertex_data(columns=["a"])["a"].values_host.tolist() == (
expected_values_prop_a
)
expected_values_prop_b = [
3,
1,
3,
2,
3,
3,
8,
9,
]
assert pG.get_vertex_data(columns=["b"])["b"].values_host.tolist() == (
expected_values_prop_b
)
@pytest.mark.sg
def test_fillna_edges():
from cugraph.experimental import PropertyGraph
df_edgelist = cudf.DataFrame(
{
"src": [0, 7, 2, 0, 1, 3, 1, 4, 5, 6],
"dst": [1, 1, 1, 3, 2, 1, 6, 5, 6, 7],
"val": [1, None, 2, None, 3, None, 4, None, 5, None],
}
)
df_props = cudf.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5, 6, 7],
"a": [0, 1, None, 2, None, 4, 1, 8],
"b": [None, 1, None, 2, None, 3, 8, 9],
}
)
pG = PropertyGraph()
pG.add_edge_data(df_edgelist, vertex_col_names=["src", "dst"])
pG.add_vertex_data(df_props, vertex_col_name="id")
pG.fillna_edges(2)
assert not pG.get_edge_data(columns=["val"]).isna().any().any()
assert pG.get_vertex_data(columns=["a", "b"]).isna().any().any()
expected_values_prop_val = [
1,
2,
2,
2,
3,
2,
4,
2,
5,
2,
]
assert pG.get_edge_data(columns=["val"])["val"].values_host.tolist() == (
expected_values_prop_val
)
@pytest.mark.sg
def test_types_from_numerals():
from cugraph.experimental import PropertyGraph
df_edgelist_cow = cudf.DataFrame(
{
"src": [0, 7, 2, 0, 1],
"dst": [1, 1, 1, 3, 2],
"val": [1, 3, 2, 3, 3],
}
)
df_edgelist_pig = cudf.DataFrame(
{
"src": [3, 1, 4, 5, 6],
"dst": [1, 6, 5, 6, 7],
"val": [5, 4, 5, 5, 2],
}
)
df_props_duck = cudf.DataFrame(
{
"id": [0, 1, 2, 3],
"a": [0, 1, 6, 2],
"b": [2, 1, 2, 2],
}
)
df_props_goose = cudf.DataFrame(
{
"id": [4, 5, 6, 7],
"a": [5, 4, 1, 8],
"b": [2, 3, 8, 9],
}
)
pG = PropertyGraph()
pG.add_edge_data(df_edgelist_cow, vertex_col_names=["src", "dst"], type_name="cow")
pG.add_edge_data(df_edgelist_pig, vertex_col_names=["src", "dst"], type_name="pig")
pG.add_vertex_data(df_props_duck, vertex_col_name="id", type_name="duck")
pG.add_vertex_data(df_props_goose, vertex_col_name="id", type_name="goose")
assert pG.vertex_types_from_numerals(
cudf.Series([0, 1, 0, 0, 1, 0, 1, 1])
).values_host.tolist() == [
"duck",
"goose",
"duck",
"duck",
"goose",
"duck",
"goose",
"goose",
]
assert pG.edge_types_from_numerals(
cudf.Series([1, 1, 0, 1, 1, 0, 0, 1, 1])
).values_host.tolist() == [
"pig",
"pig",
"cow",
"pig",
"pig",
"cow",
"cow",
"pig",
"pig",
]
# =============================================================================
# Benchmarks
# =============================================================================
def bench_num_vertices(gpubenchmark, dataset1_PropertyGraph):
(pG, data) = dataset1_PropertyGraph
assert gpubenchmark(pG.get_num_vertices) == 9
def bench_get_vertices(gpubenchmark, dataset1_PropertyGraph):
(pG, data) = dataset1_PropertyGraph
gpubenchmark(pG.get_vertices)
def bench_extract_subgraph_for_cyber(gpubenchmark, cyber_PropertyGraph):
from cugraph.experimental import PropertyGraph
pG = cyber_PropertyGraph
scn = PropertyGraph.src_col_name
dcn = PropertyGraph.dst_col_name
# Create a Graph containing only specific src or dst vertices
verts = ["10.40.182.3", "10.40.182.255", "59.166.0.9", "59.166.0.8"]
selected_edges = pG.select_edges(f"{scn}.isin({verts}) | {dcn}.isin({verts})")
gpubenchmark(
pG.extract_subgraph,
create_using=cugraph.Graph(directed=True),
selection=selected_edges,
default_edge_weight=1.0,
check_multi_edges=False,
)
def bench_extract_subgraph_for_cyber_detect_duplicate_edges(
gpubenchmark, cyber_PropertyGraph
):
from cugraph.experimental import PropertyGraph
pG = cyber_PropertyGraph
scn = PropertyGraph.src_col_name
dcn = PropertyGraph.dst_col_name
# Create a Graph containing only specific src or dst vertices
verts = ["10.40.182.3", "10.40.182.255", "59.166.0.9", "59.166.0.8"]
selected_edges = pG.select_edges(f"{scn}.isin({verts}) | {dcn}.isin({verts})")
def func():
with pytest.raises(RuntimeError):
pG.extract_subgraph(
create_using=cugraph.Graph(directed=True),
selection=selected_edges,
default_edge_weight=1.0,
check_multi_edges=True,
)
gpubenchmark(func)
def bench_extract_subgraph_for_rmat(gpubenchmark, rmat_PropertyGraph):
from cugraph.experimental import PropertyGraph
(pG, generated_df) = rmat_PropertyGraph
scn = PropertyGraph.src_col_name
dcn = PropertyGraph.dst_col_name
verts = []
for i in range(0, 10000, 10):
verts.append(generated_df["src"].iloc[i])
selected_edges = pG.select_edges(f"{scn}.isin({verts}) | {dcn}.isin({verts})")
gpubenchmark(
pG.extract_subgraph,
create_using=cugraph.Graph(directed=True),
selection=selected_edges,
default_edge_weight=1.0,
check_multi_edges=False,
)
@pytest.mark.slow
@pytest.mark.parametrize("n_rows", [15_000_000, 30_000_000, 60_000_000, 120_000_000])
def bench_add_edge_data(gpubenchmark, n_rows):
from cugraph.experimental import PropertyGraph
def func():
pg = PropertyGraph()
src = cp.arange(n_rows)
dst = src - 1
df = cudf.DataFrame({"src": src, "dst": dst})
pg.add_edge_data(df, ["src", "dst"], type_name="('_N', '_E', '_N')")
gpubenchmark(func)
# This test runs for *minutes* with the current implementation, and since
# benchmarking can call it multiple times per run, the overall time for this
# test can be ~20 minutes.
@pytest.mark.slow
def bench_extract_subgraph_for_rmat_detect_duplicate_edges(
gpubenchmark, rmat_PropertyGraph
):
from cugraph.experimental import PropertyGraph
(pG, generated_df) = rmat_PropertyGraph
scn = PropertyGraph.src_col_name
dcn = PropertyGraph.dst_col_name
verts = []
for i in range(0, 10000, 10):
verts.append(generated_df["src"].iloc[i])
selected_edges = pG.select_edges(f"{scn}.isin({verts}) | {dcn}.isin({verts})")
def func():
with pytest.raises(RuntimeError):
pG.extract_subgraph(
create_using=cugraph.Graph(directed=True),
selection=selected_edges,
default_edge_weight=1.0,
check_multi_edges=True,
)
gpubenchmark(func)
@pytest.mark.slow
@pytest.mark.parametrize("N", [1, 3, 10, 30])
def bench_add_edges_cyber(gpubenchmark, N):
from cugraph.experimental import PropertyGraph
# Partition the dataframe to add in chunks
cyber_df = cyber.get_edgelist()
chunk = (len(cyber_df) + N - 1) // N
dfs = [cyber_df.iloc[i * chunk : (i + 1) * chunk] for i in range(N)]
def func():
pG = PropertyGraph()
for df in dfs:
pG.add_edge_data(df, ("srcip", "dstip"))
df = pG.get_edge_data()
assert len(df) == len(cyber_df)
gpubenchmark(func)
# @pytest.mark.slow
@pytest.mark.parametrize("n_rows", [10_000, 100_000, 1_000_000, 10_000_000])
@pytest.mark.parametrize("n_feats", [32, 64, 128])
def bench_add_vector_features(gpubenchmark, n_rows, n_feats):
from cugraph.experimental import PropertyGraph
df = cudf.DataFrame(
{
"src": cp.arange(0, n_rows, dtype=cp.int32),
"dst": cp.arange(0, n_rows, dtype=cp.int32) + 1,
}
)
for i in range(n_feats):
df[f"feat_{i}"] = cp.ones(len(df), dtype=cp.int32)
vector_properties = {"feat": [f"feat_{i}" for i in range(n_feats)]}
def func():
pG = PropertyGraph()
pG.add_edge_data(
df, vertex_col_names=["src", "dst"], vector_properties=vector_properties
)
gpubenchmark(func)
# @pytest.mark.slow
@pytest.mark.parametrize("n_rows", [1_000_000])
@pytest.mark.parametrize("n_feats", [128])
def bench_get_vector_features_cp_array(benchmark, n_rows, n_feats):
from cugraph.experimental import PropertyGraph
df = cudf.DataFrame(
{
"src": cp.arange(0, n_rows, dtype=cp.int32),
"dst": cp.arange(0, n_rows, dtype=cp.int32) + 1,
}
)
for i in range(n_feats):
df[f"feat_{i}"] = cp.ones(len(df), dtype=cp.int32)
vector_properties = {"feat": [f"feat_{i}" for i in range(n_feats)]}
pG = PropertyGraph()
pG.add_edge_data(
df, vertex_col_names=["src", "dst"], vector_properties=vector_properties
)
benchmark(pG.get_edge_data, edge_ids=cp.arange(0, 100_000))
# @pytest.mark.slow
@pytest.mark.parametrize("n_rows", [1_000_000])
@pytest.mark.parametrize("n_feats", [128])
def bench_get_vector_features_cudf_series(benchmark, n_rows, n_feats):
from cugraph.experimental import PropertyGraph
df = cudf.DataFrame(
{
"src": cp.arange(0, n_rows, dtype=cp.int32),
"dst": cp.arange(0, n_rows, dtype=cp.int32) + 1,
}
)
for i in range(n_feats):
df[f"feat_{i}"] = cp.ones(len(df), dtype=cp.int32)
vector_properties = {"feat": [f"feat_{i}" for i in range(n_feats)]}
pG = PropertyGraph()
pG.add_edge_data(
df, vertex_col_names=["src", "dst"], vector_properties=vector_properties
)
benchmark(pG.get_edge_data, edge_ids=cudf.Series(cp.arange(0, 100_000)))
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage_wholegraph.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
from cugraph.gnn import FeatureStore
from cugraph.utilities.utils import import_optional, MissingModule
pylibwholegraph = import_optional("pylibwholegraph")
wmb = import_optional("pylibwholegraph.binding.wholememory_binding")
torch = import_optional("torch")
def runtest(world_rank: int, world_size: int):
from pylibwholegraph.torch.initialize import init_torch_env_and_create_wm_comm
wm_comm, _ = init_torch_env_and_create_wm_comm(
world_rank,
world_size,
world_rank,
world_size,
)
wm_comm = wm_comm.wmb_comm
generator = np.random.default_rng(62)
arr = (
generator.integers(low=0, high=100, size=100_000)
.reshape(10_000, -1)
.astype("float64")
)
fs = FeatureStore(backend="wholegraph")
fs.add_data(arr, "type2", "feat1")
wm_comm.barrier()
indices_to_fetch = np.random.randint(low=0, high=len(arr), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type2", feat_name="feat1")
assert isinstance(output_fs, torch.Tensor)
assert output_fs.is_cuda
expected = arr[indices_to_fetch]
np.testing.assert_array_equal(output_fs.cpu().numpy(), expected)
wmb.finalize()
@pytest.mark.sg
@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available")
@pytest.mark.skipif(
isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available"
)
def test_feature_storage_wholegraph_backend():
from pylibwholegraph.utils.multiprocess import multiprocess_run
gpu_count = wmb.fork_get_gpu_count()
print("gpu count:", gpu_count)
assert gpu_count > 0
multiprocess_run(1, runtest)
@pytest.mark.mg
@pytest.mark.skipif(isinstance(torch, MissingModule), reason="torch not available")
@pytest.mark.skipif(
isinstance(pylibwholegraph, MissingModule), reason="wholegraph not available"
)
def test_feature_storage_wholegraph_backend_mg():
from pylibwholegraph.utils.multiprocess import multiprocess_run
gpu_count = wmb.fork_get_gpu_count()
print("gpu count:", gpu_count)
assert gpu_count > 0
multiprocess_run(gpu_count, runtest)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/data_store/test_gnn_feat_storage.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import cudf
from cugraph.gnn import FeatureStore
@pytest.mark.sg
def test_feature_storage_from_numpy():
ar1 = np.random.randint(low=0, high=100, size=100_000)
ar2 = np.random.randint(low=0, high=100, size=100_000)
ar3 = np.random.randint(low=0, high=100, size=100_000).reshape(10_000, -1)
fs = FeatureStore(backend="numpy")
fs.add_data(ar1, "type1", "feat1")
fs.add_data(ar2, "type1", "feat2")
fs.add_data(ar3, "type2", "feat1")
indices_to_fetch = np.random.randint(low=0, high=len(ar1), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type1", feat_name="feat1")
expected = ar1[indices_to_fetch]
np.testing.assert_array_equal(output_fs, expected)
indices_to_fetch = np.random.randint(low=0, high=len(ar2), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type1", feat_name="feat2")
expected = ar2[indices_to_fetch]
np.testing.assert_array_equal(output_fs, expected)
indices_to_fetch = np.random.randint(low=0, high=len(ar3), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type2", feat_name="feat1")
expected = ar3[indices_to_fetch]
np.testing.assert_array_equal(output_fs, expected)
@pytest.mark.sg
def test_feature_storage_from_cudf():
ar1 = np.random.randint(low=0, high=100, size=100_000).reshape(10_000, -1)
df1 = cudf.DataFrame(ar1)
ar2 = np.random.randint(low=0, high=100, size=100_000).reshape(10_000, -1)
df2 = cudf.DataFrame(ar2)
ar3 = np.random.randint(low=0, high=100, size=100_000).reshape(10_000, -1)
df3 = cudf.DataFrame(ar3)
fs = FeatureStore(backend="numpy")
fs.add_data(df1, "type1", "feat1")
fs.add_data(df2, "type1", "feat2")
fs.add_data(df3, "type2", "feat1")
indices_to_fetch = np.random.randint(low=0, high=len(ar1), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type1", feat_name="feat1")
expected = ar1[indices_to_fetch]
np.testing.assert_array_equal(output_fs, expected)
indices_to_fetch = np.random.randint(low=0, high=len(ar2), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type1", feat_name="feat2")
expected = ar2[indices_to_fetch]
np.testing.assert_array_equal(output_fs, expected)
indices_to_fetch = np.random.randint(low=0, high=len(ar3), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type2", feat_name="feat1")
expected = ar3[indices_to_fetch]
np.testing.assert_array_equal(output_fs, expected)
@pytest.mark.sg
def test_feature_storage_pytorch_backend():
try:
import torch
except ModuleNotFoundError:
pytest.skip("pytorch not available")
ar1 = np.random.randint(low=0, high=100, size=100_000)
ar2 = np.random.randint(low=0, high=100, size=100_000)
ar3 = np.random.randint(low=0, high=100, size=100_000).reshape(-1, 10)
fs = FeatureStore(backend="torch")
fs.add_data(ar1, "type1", "feat1")
fs.add_data(ar2, "type1", "feat2")
fs.add_data(ar3, "type2", "feat1")
indices_to_fetch = np.random.randint(low=0, high=len(ar1), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type1", feat_name="feat1")
expected = ar1[indices_to_fetch]
assert isinstance(output_fs, torch.Tensor)
np.testing.assert_array_equal(output_fs.numpy(), expected)
indices_to_fetch = np.random.randint(low=0, high=len(ar2), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type1", feat_name="feat2")
expected = ar2[indices_to_fetch]
assert isinstance(output_fs, torch.Tensor)
np.testing.assert_array_equal(output_fs.numpy(), expected)
indices_to_fetch = np.random.randint(low=0, high=len(ar3), size=1024)
output_fs = fs.get_data(indices_to_fetch, type_name="type2", feat_name="feat1")
expected = ar3[indices_to_fetch]
assert isinstance(output_fs, torch.Tensor)
np.testing.assert_array_equal(output_fs.numpy(), expected)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/data_store/test_property_graph_mg.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import pandas as pd
import numpy as np
import cudf
import cugraph
import dask_cudf
import cupy as cp
import cugraph.dask as dcg
from cupy.testing import assert_array_equal
from cudf.testing import assert_frame_equal, assert_series_equal
from pylibcugraph.testing.utils import gen_fixture_params_product
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.datasets import cyber, netscience
# If the rapids-pytest-benchmark plugin is installed, the "gpubenchmark"
# fixture will be available automatically. Check that this fixture is available
# by trying to import rapids_pytest_benchmark, and if that fails, set
# "gpubenchmark" to the standard "benchmark" fixture provided by
# pytest-benchmark.
try:
import rapids_pytest_benchmark # noqa: F401
except ImportError:
import pytest_benchmark
gpubenchmark = pytest_benchmark.plugin.benchmark
def type_is_categorical(pG):
return (
pG._vertex_prop_dataframe is None
or pG._vertex_prop_dataframe.dtypes[pG.type_col_name] == "category"
) and (
pG._edge_prop_dataframe is None
or pG._edge_prop_dataframe.dtypes[pG.type_col_name] == "category"
)
# =============================================================================
# Test data
# =============================================================================
dataset1 = {
"merchants": [
[
"merchant_id",
"merchant_location",
"merchant_size",
"merchant_sales",
"merchant_num_employees",
"merchant_name",
],
[
(11, 78750, 44, 123.2, 12, "north"),
(4, 78757, 112, 234.99, 18, "south"),
(21, 44145, 83, 992.1, 27, "east"),
(16, 47906, 92, 32.43, 5, "west"),
(86, 47906, 192, 2.43, 51, "west"),
],
],
"users": [
["user_id", "user_location", "vertical"],
[
(89021, 78757, 0),
(32431, 78750, 1),
(89216, 78757, 1),
(78634, 47906, 0),
],
],
"transactions": [
["user_id", "merchant_id", "volume", "time", "card_num", "card_type"],
[
(89021, 11, 33.2, 1639084966.5513437, 123456, "MC"),
(89216, 4, None, 1639085163.481217, 8832, "CASH"),
(78634, 16, 72.0, 1639084912.567394, 4321, "DEBIT"),
(32431, 4, 103.2, 1639084721.354346, 98124, "V"),
],
],
"relationships": [
["user_id_1", "user_id_2", "relationship_type"],
[
(89216, 89021, 9),
(89216, 32431, 9),
(32431, 78634, 8),
(78634, 89216, 8),
],
],
"referrals": [
["user_id_1", "user_id_2", "merchant_id", "stars"],
[
(89216, 78634, 11, 5),
(89021, 89216, 4, 4),
(89021, 89216, 21, 3),
(89021, 89216, 11, 3),
(89021, 78634, 21, 4),
(78634, 32431, 11, 4),
],
],
}
dataset2 = {
"simple": [
["src", "dst", "some_property"],
[
(99, 22, "a"),
(98, 34, "b"),
(97, 56, "c"),
(96, 88, "d"),
],
],
}
# Placeholder for a directed Graph instance. This is not constructed here in
# order to prevent cuGraph code from running on import, which would prevent
# proper pytest collection if an exception is raised. See setup_function().
DiGraph_inst = None
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
global DiGraph_inst
gc.collect()
# Set the global DiGraph_inst. This is used for calls that require a Graph
# type or instance to be provided for tests that use a directed graph.
DiGraph_inst = cugraph.Graph(directed=True)
# =============================================================================
# Pytest fixtures
# =============================================================================
df_types = [cudf.DataFrame]
def df_type_id(dataframe_type):
"""
Return a string that describes the dataframe_type, used for test output.
"""
s = "df_type="
if dataframe_type == cudf.DataFrame:
return s + "cudf.DataFrame"
if dataframe_type == pd.DataFrame:
return s + "pandas.DataFrame"
if dataframe_type == dask_cudf.core.DataFrame:
return s + "dask_cudf.core.DataFrame"
return s + "?"
df_types_fixture_params = gen_fixture_params_product((df_types, df_type_id))
@pytest.fixture(scope="module", params=df_types_fixture_params)
def net_PropertyGraph(request):
"""
Fixture which returns an instance of a PropertyGraph with vertex and edge
data added from the netscience.csv dataset, parameterized for different
DataFrame types.
"""
from cugraph.experimental import PropertyGraph
dataframe_type = request.param[0]
netscience_csv = netscience.get_path()
source_col_name = "src"
dest_col_name = "dst"
if dataframe_type is pd.DataFrame:
read_csv = pd.read_csv
else:
read_csv = cudf.read_csv
df = read_csv(
netscience_csv,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
pG = PropertyGraph()
pG.add_edge_data(df, (source_col_name, dest_col_name))
return pG
@pytest.fixture(scope="module", params=df_types_fixture_params)
def dataset1_PropertyGraph(request):
"""
Fixture which returns an instance of a PropertyGraph with vertex and edge
data added from dataset1, parameterized for different DataFrame types.
"""
dataframe_type = request.param[0]
from cugraph.experimental import PropertyGraph
(merchants, users, transactions, relationships, referrals) = dataset1.values()
pG = PropertyGraph()
# Vertex and edge data is added as one or more DataFrames; either a Pandas
# DataFrame to keep data on the CPU, a cuDF DataFrame to keep data on GPU,
# or a dask_cudf DataFrame to keep data on distributed GPUs.
# For dataset1: vertices are merchants and users, edges are transactions,
# relationships, and referrals.
# property_columns=None (the default) means all columns except
# vertex_col_name will be used as properties for the vertices/edges.
pG.add_vertex_data(
dataframe_type(columns=merchants[0], data=merchants[1]),
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=None,
)
pG.add_vertex_data(
dataframe_type(columns=users[0], data=users[1]),
type_name="users",
vertex_col_name="user_id",
property_columns=None,
)
pG.add_edge_data(
dataframe_type(columns=transactions[0], data=transactions[1]),
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
pG.add_edge_data(
dataframe_type(columns=relationships[0], data=relationships[1]),
type_name="relationships",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
pG.add_edge_data(
dataframe_type(columns=referrals[0], data=referrals[1]),
type_name="referrals",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
assert type_is_categorical(pG)
return (pG, dataset1)
@pytest.fixture(scope="function")
def dataset1_MGPropertyGraph(dask_client):
"""
Fixture which returns an instance of a PropertyGraph with vertex and edge
data added from dataset1, parameterized for different DataFrame types.
"""
dataframe_type = cudf.DataFrame
(merchants, users, transactions, relationships, referrals) = dataset1.values()
from cugraph.experimental import MGPropertyGraph
mpG = MGPropertyGraph()
# Vertex and edge data is added as one or more DataFrames; either a Pandas
# DataFrame to keep data on the CPU, a cuDF DataFrame to keep data on GPU,
# or a dask_cudf DataFrame to keep data on distributed GPUs.
# For dataset1: vertices are merchants and users, edges are transactions,
# relationships, and referrals.
# property_columns=None (the default) means all columns except
# vertex_col_name will be used as properties for the vertices/edges.
sg_df = dataframe_type(columns=merchants[0], data=merchants[1])
mg_df = dask_cudf.from_cudf(sg_df, npartitions=2)
mpG.add_vertex_data(
mg_df,
type_name="merchants",
vertex_col_name="merchant_id",
property_columns=None,
)
sg_df = dataframe_type(columns=users[0], data=users[1])
mg_df = dask_cudf.from_cudf(sg_df, npartitions=2)
mpG.add_vertex_data(
mg_df, type_name="users", vertex_col_name="user_id", property_columns=None
)
sg_df = dataframe_type(columns=transactions[0], data=transactions[1])
mg_df = dask_cudf.from_cudf(sg_df, npartitions=2)
mpG.add_edge_data(
mg_df,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
sg_df = dataframe_type(columns=relationships[0], data=relationships[1])
mg_df = dask_cudf.from_cudf(sg_df, npartitions=2)
mpG.add_edge_data(
mg_df,
type_name="relationships",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
sg_df = dataframe_type(columns=referrals[0], data=referrals[1])
mg_df = dask_cudf.from_cudf(sg_df, npartitions=2)
mpG.add_edge_data(
mg_df,
type_name="referrals",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
assert type_is_categorical(mpG)
return (mpG, dataset1)
@pytest.fixture(scope="module")
def dataset2_simple_MGPropertyGraph(dask_client):
from cugraph.experimental import MGPropertyGraph
dataframe_type = cudf.DataFrame
simple = dataset2["simple"]
mpG = MGPropertyGraph()
sg_df = dataframe_type(columns=simple[0], data=simple[1])
mgdf = dask_cudf.from_cudf(sg_df, npartitions=2)
mpG.add_edge_data(mgdf, vertex_col_names=("src", "dst"))
assert type_is_categorical(mpG)
return (mpG, simple)
@pytest.fixture(scope="module")
def dataset2_MGPropertyGraph(dask_client):
from cugraph.experimental import MGPropertyGraph
dataframe_type = cudf.DataFrame
simple = dataset2["simple"]
mpG = MGPropertyGraph()
sg_df = dataframe_type(columns=simple[0], data=simple[1])
mgdf = dask_cudf.from_cudf(sg_df, npartitions=2)
mpG.add_edge_data(mgdf, vertex_col_names=("src", "dst"))
assert type_is_categorical(mpG)
return (mpG, simple)
@pytest.fixture(scope="module", params=df_types_fixture_params)
def net_MGPropertyGraph(dask_client):
"""
Fixture which returns an instance of a PropertyGraph with vertex and edge
data added from the netscience.csv dataset, parameterized for different
DataFrame types.
"""
from cugraph.experimental import MGPropertyGraph
input_data_path = str(netscience.get_path())
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dpG = MGPropertyGraph()
dpG.add_edge_data(ddf, ("src", "dst"))
assert type_is_categorical(dpG)
return dpG
@pytest.mark.mg
@pytest.mark.skip(reason="Skipping tests because it is a work in progress")
def test_extract_subgraph_no_query(net_MGPropertyGraph, net_PropertyGraph):
"""
Call extract with no args, should result in the entire property graph.
"""
dpG = net_MGPropertyGraph
pG = net_PropertyGraph
assert pG.get_num_edges() == dpG.get_num_edges()
assert pG.get_num_vertices() == dpG.get_num_vertices()
# tests that the edges are the same in the sg and mg property graph
sg_df = pG.edges.sort_values(by=["_SRC_", "_DST_"]).reset_index(drop=True)
mg_df = dpG.edges.compute().sort_values(by=["_SRC_", "_DST_"])
mg_df = mg_df.reset_index(drop=True)
assert sg_df.equals(mg_df)
subgraph = pG.extract_subgraph()
dask_subgraph = dpG.extract_subgraph()
sg_subgraph_df = subgraph.edge_data.sort_values(by=list(subgraph.edge_data.columns))
sg_subgraph_df = sg_subgraph_df.reset_index(drop=True)
mg_subgraph_df = dask_subgraph.edge_data.compute()
mg_subgraph_df = mg_subgraph_df.sort_values(by=list(mg_subgraph_df.columns))
mg_subgraph_df = mg_subgraph_df.reset_index(drop=True)
assert sg_subgraph_df[["_SRC_", "_DST_"]].equals(mg_subgraph_df[["_SRC_", "_DST_"]])
assert sg_subgraph_df.dtypes["_TYPE_"] == "category"
assert mg_subgraph_df.dtypes["_TYPE_"] == "category"
@pytest.mark.mg
@pytest.mark.skip(reason="Skipping tests because it is a work in progress")
def test_adding_fixture(dataset1_PropertyGraph, dataset1_MGPropertyGraph):
(sgpG, _) = dataset1_PropertyGraph
(mgPG, _) = dataset1_MGPropertyGraph
subgraph = sgpG.extract_subgraph()
dask_subgraph = mgPG.extract_subgraph()
sg_subgraph_df = subgraph.edge_data.sort_values(by=list(subgraph.edge_data.columns))
sg_subgraph_df = sg_subgraph_df.reset_index(drop=True)
mg_subgraph_df = dask_subgraph.edge_data.compute()
mg_subgraph_df = mg_subgraph_df.sort_values(by=list(mg_subgraph_df.columns))
mg_subgraph_df = mg_subgraph_df.reset_index(drop=True)
assert sg_subgraph_df[["_SRC_", "_DST_"]].equals(mg_subgraph_df[["_SRC_", "_DST_"]])
assert sg_subgraph_df.dtypes["_TYPE_"] == "category"
assert mg_subgraph_df.dtypes["_TYPE_"] == "category"
@pytest.mark.mg
@pytest.mark.skip(reason="Skipping tests because it is a work in progress")
def test_frame_data(dataset1_PropertyGraph, dataset1_MGPropertyGraph):
(sgpG, _) = dataset1_PropertyGraph
(mgpG, _) = dataset1_MGPropertyGraph
edge_sort_col = ["_SRC_", "_DST_", "_TYPE_"]
vert_sort_col = ["_VERTEX_", "_TYPE_"]
# vertex_prop_dataframe
sg_vp_df = sgpG._vertex_prop_dataframe.sort_values(by=vert_sort_col).reset_index(
drop=True
)
mg_vp_df = (
mgpG._vertex_prop_dataframe.compute()
.sort_values(by=vert_sort_col)
.reset_index(drop=True)
)
assert sg_vp_df["_VERTEX_"].equals(mg_vp_df["_VERTEX_"])
# get_edge_prop_dataframe
sg_ep_df = sgpG._edge_prop_dataframe.sort_values(by=edge_sort_col).reset_index(
drop=True
)
mg_ep_df = (
mgpG._edge_prop_dataframe.compute()
.sort_values(by=edge_sort_col)
.reset_index(drop=True)
)
assert sg_ep_df["_SRC_"].equals(mg_ep_df["_SRC_"])
assert sg_ep_df.dtypes["_TYPE_"] == "category"
assert mg_ep_df.dtypes["_TYPE_"] == "category"
@pytest.mark.mg
@pytest.mark.parametrize("set_index", [True, False])
def test_add_edge_data_with_ids(dask_client, set_index):
"""
add_edge_data() on "transactions" table, all properties.
"""
from cugraph.experimental import MGPropertyGraph
transactions = dataset1["transactions"]
transactions_df = cudf.DataFrame(columns=transactions[0], data=transactions[1])
transactions_df["edge_id"] = list(range(10, 10 + len(transactions_df)))
transactions_ids = transactions_df["edge_id"]
if set_index:
transactions_df.set_index("edge_id", inplace=True)
transactions_df = dask_cudf.from_cudf(transactions_df, npartitions=2)
pG = MGPropertyGraph()
pG.add_edge_data(
transactions_df,
type_name="transactions",
edge_id_col_name="edge_id",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
assert pG.get_num_vertices() == 7
# 'transactions' is edge type, not vertex type
assert pG.get_num_vertices("transactions") == 0
assert pG.get_num_edges() == 4
assert pG.get_num_edges("transactions") == 4
# Original SRC and DST columns no longer include "merchant_id", "user_id"
expected_props = ["volume", "time", "card_num", "card_type"]
assert sorted(pG.edge_property_names) == sorted(expected_props)
relationships = dataset1["relationships"]
relationships_df = cudf.DataFrame(columns=relationships[0], data=relationships[1])
# user-provided, then auto-gen (not allowed)
with pytest.raises(NotImplementedError):
pG.add_edge_data(
dask_cudf.from_cudf(relationships_df, npartitions=2),
type_name="relationships",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
relationships_df["edge_id"] = list(range(30, 30 + len(relationships_df)))
relationships_ids = relationships_df["edge_id"]
if set_index:
relationships_df.set_index("edge_id", inplace=True)
relationships_df = dask_cudf.from_cudf(relationships_df, npartitions=2)
pG.add_edge_data(
relationships_df,
type_name="relationships",
edge_id_col_name="edge_id",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
df = pG.get_edge_data(types="transactions").compute()
assert_series_equal(
df[pG.edge_id_col_name].sort_values().reset_index(drop=True),
transactions_ids,
check_names=False,
)
df = pG.get_edge_data(types="relationships").compute()
assert_series_equal(
df[pG.edge_id_col_name].sort_values().reset_index(drop=True),
relationships_ids,
check_names=False,
)
# auto-gen, then user-provided (not allowed)
pG = MGPropertyGraph()
pG.add_edge_data(
transactions_df,
type_name="transactions",
vertex_col_names=("user_id", "merchant_id"),
property_columns=None,
)
with pytest.raises(NotImplementedError):
pG.add_edge_data(
relationships_df,
type_name="relationships",
edge_id_col_name="edge_id",
vertex_col_names=("user_id_1", "user_id_2"),
property_columns=None,
)
@pytest.mark.mg
def test_property_names_attrs(dataset1_MGPropertyGraph):
"""
Ensure the correct number of user-visible properties for vertices and edges
are returned. This should exclude the internal bookkeeping properties.
"""
(pG, data) = dataset1_MGPropertyGraph
# _VERTEX_ columns: "merchant_id", "user_id"
expected_vert_prop_names = [
"merchant_location",
"merchant_size",
"merchant_sales",
"merchant_num_employees",
"user_location",
"merchant_name",
"vertical",
]
# _SRC_ and _DST_ columns: "user_id", "user_id_1", "user_id_2"
# Note that "merchant_id" is a property in for type "transactions"
expected_edge_prop_names = [
"merchant_id",
"volume",
"time",
"card_num",
"card_type",
"relationship_type",
"stars",
]
# Extracting a subgraph with weights has/had a side-effect of adding a
# weight column, so call extract_subgraph() to ensure the internal weight
# column name is not present.
pG.extract_subgraph(default_edge_weight=1.0)
actual_vert_prop_names = pG.vertex_property_names
actual_edge_prop_names = pG.edge_property_names
assert sorted(actual_vert_prop_names) == sorted(expected_vert_prop_names)
assert sorted(actual_edge_prop_names) == sorted(expected_edge_prop_names)
@pytest.mark.mg
@pytest.mark.parametrize("as_pg_first", [False, True])
def test_extract_subgraph_nonrenumbered_noedgedata(
dataset2_simple_MGPropertyGraph, as_pg_first
):
"""
Ensure a subgraph can be extracted that contains no edge_data.
"""
from cugraph import Graph
(pG, data) = dataset2_simple_MGPropertyGraph
if as_pg_first:
G = pG.extract_subgraph(create_using=pG).extract_subgraph(
create_using=Graph(directed=True), add_edge_data=False
)
else:
G = pG.extract_subgraph(create_using=Graph(directed=True), add_edge_data=False)
actual_edgelist = G.edgelist.edgelist_df.compute()
src_col_name = pG.src_col_name
dst_col_name = pG.dst_col_name
# create a DF without the properties (ie. the last column)
expected_edgelist = cudf.DataFrame(
columns=[src_col_name, dst_col_name], data=[(i, j) for (i, j, k) in data[1]]
)
assert_frame_equal(
expected_edgelist.sort_values(by=src_col_name, ignore_index=True),
actual_edgelist.sort_values(by=src_col_name, ignore_index=True),
)
assert hasattr(G, "edge_data") is False
@pytest.mark.mg
def test_num_vertices_with_properties(dataset2_simple_MGPropertyGraph):
"""
Checks that the num_vertices_with_properties attr is set to the number of
vertices that have properties, as opposed to just num_vertices which also
includes all verts in the graph edgelist.
"""
(pG, data) = dataset2_simple_MGPropertyGraph
# assume no repeated vertices
assert pG.get_num_vertices() == len(data[1]) * 2
assert pG.get_num_vertices(include_edge_data=False) == 0
df = cudf.DataFrame(
{
"vertex": [98, 97],
"some_property": ["a", "b"],
}
)
mgdf = dask_cudf.from_cudf(df, npartitions=2)
pG.add_vertex_data(mgdf, vertex_col_name="vertex")
# assume no repeated vertices
assert pG.get_num_vertices() == len(data[1]) * 2
assert pG.get_num_vertices(include_edge_data=False) == 2
assert type_is_categorical(pG)
@pytest.mark.mg
def test_edges_attr(dataset2_simple_MGPropertyGraph):
"""
Ensure the edges attr returns the src, dst, edge_id columns properly.
"""
(pG, data) = dataset2_simple_MGPropertyGraph
# create a DF without the properties (ie. the last column)
expected_edges = cudf.DataFrame(
columns=[pG.src_col_name, pG.dst_col_name],
data=[(i, j) for (i, j, k) in data[1]],
)
actual_edges = pG.edges[[pG.src_col_name, pG.dst_col_name]].compute()
assert_frame_equal(
expected_edges.sort_values(by=pG.src_col_name, ignore_index=True),
actual_edges.sort_values(by=pG.src_col_name, ignore_index=True),
)
edge_ids = pG.edges[pG.edge_id_col_name].compute()
expected_num_edges = len(data[1])
assert len(edge_ids) == expected_num_edges
assert edge_ids.nunique() == expected_num_edges
@pytest.mark.mg
def test_get_vertex_data(dataset1_MGPropertyGraph):
"""
Ensure PG.get_vertex_data() returns the correct data based on vertex IDs
passed in.
"""
(pG, data) = dataset1_MGPropertyGraph
# Ensure the generated vertex IDs are unique
all_vertex_data = pG.get_vertex_data()
assert all_vertex_data[pG.vertex_col_name].nunique().compute() == len(
all_vertex_data
)
# Test with specific columns and types
vert_type = "merchants"
columns = ["merchant_location", "merchant_size"]
some_vertex_data = pG.get_vertex_data(types=[vert_type], columns=columns)
# Ensure the returned df is the right length and includes only the
# vert/type + specified columns
standard_vert_columns = [pG.vertex_col_name, pG.type_col_name]
assert len(some_vertex_data) == len(data[vert_type][1])
assert sorted(some_vertex_data.columns) == sorted(columns + standard_vert_columns)
assert some_vertex_data.dtypes["_TYPE_"] == "category"
# Test with all params specified
vert_ids = [11, 4, 21]
vert_type = "merchants"
columns = ["merchant_location", "merchant_size"]
some_vertex_data = pG.get_vertex_data(
vertex_ids=vert_ids, types=[vert_type], columns=columns
)
# Ensure the returned df is the right length and includes at least the
# specified columns.
assert len(some_vertex_data) == len(vert_ids)
assert set(columns) - set(some_vertex_data.columns) == set()
assert some_vertex_data.dtypes["_TYPE_"] == "category"
# Allow a single vertex type and single vertex id to be passed in
df1 = pG.get_vertex_data(vertex_ids=[11], types=[vert_type]).compute()
df2 = pG.get_vertex_data(vertex_ids=11, types=vert_type).compute()
assert len(df1) == 1
assert df1.shape == df2.shape
assert_frame_equal(df1, df2, check_like=True)
@pytest.mark.mg
def test_get_vertex_data_repeated(dask_client):
from cugraph.experimental import MGPropertyGraph
df = cudf.DataFrame({"vertex": [2, 3, 4, 1], "feat": [0, 1, 2, 3]})
df = dask_cudf.from_cudf(df, npartitions=2)
pG = MGPropertyGraph()
pG.add_vertex_data(df, "vertex")
df1 = pG.get_vertex_data(vertex_ids=[2, 1, 3, 1], columns=["feat"])
df1 = df1.compute()
expected = cudf.DataFrame(
{
pG.vertex_col_name: [2, 1, 3, 1],
pG.type_col_name: ["", "", "", ""],
"feat": [0, 3, 1, 3],
}
)
df1[pG.type_col_name] = df1[pG.type_col_name].astype(str) # Undo category
assert_frame_equal(df1, expected)
@pytest.mark.mg
def test_get_edge_data(dataset1_MGPropertyGraph):
"""
Ensure PG.get_edge_data() returns the correct data based on edge IDs passed
in.
"""
(pG, data) = dataset1_MGPropertyGraph
# Ensure the generated edge IDs are unique
all_edge_data = pG.get_edge_data()
assert all_edge_data[pG.edge_id_col_name].nunique().compute() == len(all_edge_data)
# Test with specific edge IDs
edge_ids = [4, 5, 6]
some_edge_data = pG.get_edge_data(edge_ids)
actual_edge_ids = some_edge_data[pG.edge_id_col_name].compute()
if hasattr(actual_edge_ids, "values_host"):
actual_edge_ids = actual_edge_ids.values_host
assert sorted(actual_edge_ids) == sorted(edge_ids)
assert some_edge_data.dtypes["_TYPE_"] == "category"
# Create a list of expected column names from the three input tables
expected_columns = set(
[pG.src_col_name, pG.dst_col_name, pG.edge_id_col_name, pG.type_col_name]
)
for d in ["transactions", "relationships", "referrals"]:
for name in data[d][0]:
expected_columns.add(name)
expected_columns -= {"user_id", "user_id_1", "user_id_2"}
actual_columns = set(some_edge_data.columns)
assert actual_columns == expected_columns
# Test with specific columns and types
edge_type = "transactions"
columns = ["card_num", "card_type"]
some_edge_data = pG.get_edge_data(types=[edge_type], columns=columns)
# Ensure the returned df is the right length and includes only the
# src/dst/id/type + specified columns
standard_edge_columns = [
pG.src_col_name,
pG.dst_col_name,
pG.edge_id_col_name,
pG.type_col_name,
]
assert len(some_edge_data) == len(data[edge_type][1])
assert sorted(some_edge_data.columns) == sorted(columns + standard_edge_columns)
assert some_edge_data.dtypes["_TYPE_"] == "category"
# Test with all params specified
# FIXME: since edge IDs are generated, assume that these are correct based
# on the intended edges being the first three added.
edge_ids = [0, 1, 2]
edge_type = "transactions"
columns = ["card_num", "card_type"]
some_edge_data = pG.get_edge_data(
edge_ids=edge_ids, types=[edge_type], columns=columns
)
# Ensure the returned df is the right length and includes at least the
# specified columns.
assert len(some_edge_data) == len(edge_ids)
assert set(columns) - set(some_edge_data.columns) == set()
assert some_edge_data.dtypes["_TYPE_"] == "category"
# Allow a single edge type and single edge id to be passed in
df1 = pG.get_edge_data(edge_ids=[1], types=[edge_type]).compute()
df2 = pG.get_edge_data(edge_ids=1, types=edge_type).compute()
assert len(df1) == 1
assert df1.shape == df2.shape
assert_frame_equal(df1, df2, check_like=True)
@pytest.mark.mg
def test_get_edge_data_repeated(dask_client):
from cugraph.experimental import MGPropertyGraph
df = cudf.DataFrame(
{"src": [1, 1, 1, 2], "dst": [2, 3, 4, 1], "edge_feat": [0, 1, 2, 3]}
)
df = dask_cudf.from_cudf(df, npartitions=2)
pG = MGPropertyGraph()
pG.add_edge_data(df, vertex_col_names=["src", "dst"])
df1 = pG.get_edge_data(edge_ids=[2, 1, 3, 1], columns=["edge_feat"])
df1 = df1.compute()
expected = cudf.DataFrame(
{
pG.edge_id_col_name: [2, 1, 3, 1],
pG.src_col_name: [1, 1, 2, 1],
pG.dst_col_name: [4, 3, 1, 3],
pG.type_col_name: ["", "", "", ""],
"edge_feat": [2, 1, 3, 1],
}
)
df1[pG.type_col_name] = df1[pG.type_col_name].astype(str) # Undo category
# Order and indices don't matter
df1 = df1.sort_values(df1.columns).reset_index(drop=True)
expected = expected.sort_values(df1.columns).reset_index(drop=True)
assert_frame_equal(df1, expected)
@pytest.mark.mg
def test_get_data_empty_graphs(dask_client):
"""
Ensures that calls to pG.get_*_data() on an empty pG are handled correctly.
"""
from cugraph.experimental import MGPropertyGraph
pG = MGPropertyGraph()
assert pG.get_vertex_data() is None
assert pG.get_vertex_data([0, 1, 2]) is None
assert pG.get_edge_data() is None
assert pG.get_edge_data([0, 1, 2]) is None
@pytest.mark.mg
@pytest.mark.parametrize("prev_id_column", [None, "prev_id"])
def test_renumber_vertices_by_type(dataset1_MGPropertyGraph, prev_id_column):
from cugraph.experimental import MGPropertyGraph
(pG, data) = dataset1_MGPropertyGraph
with pytest.raises(ValueError, match="existing column"):
pG.renumber_vertices_by_type("merchant_size")
vertex_property_names = set(pG.vertex_property_names)
edge_property_names = set(pG.edge_property_names)
df_id_ranges = pG.renumber_vertices_by_type(prev_id_column)
if prev_id_column is not None:
vertex_property_names.add(prev_id_column)
assert vertex_property_names == set(pG.vertex_property_names)
assert edge_property_names == set(pG.edge_property_names)
expected = {
"merchants": [0, 4], # stop is inclusive
"users": [5, 8],
}
for key, (start, stop) in expected.items():
assert df_id_ranges.loc[key, "start"] == start
assert df_id_ranges.loc[key, "stop"] == stop
df = pG.get_vertex_data(types=[key]).compute().to_pandas()
df = df.reset_index(drop=True)
assert len(df) == stop - start + 1
assert (df["_VERTEX_"] == pd.Series(range(start, stop + 1))).all()
if prev_id_column is not None:
cur = df[prev_id_column].sort_values()
expected = sorted(x for x, *args in data[key][1])
assert (cur == pd.Series(expected, index=cur.index)).all()
# Make sure we renumber vertex IDs in edge data too
df = pG.get_edge_data().compute()
assert 0 <= df[pG.src_col_name].min() < df[pG.src_col_name].max() < 9
assert 0 <= df[pG.dst_col_name].min() < df[pG.dst_col_name].max() < 9
empty_pG = MGPropertyGraph()
assert empty_pG.renumber_vertices_by_type(prev_id_column) is None
# Test when vertex IDs only exist in edge data
df = cudf.DataFrame({"src": [99998], "dst": [99999]})
df = dask_cudf.from_cudf(df, npartitions=1)
empty_pG.add_edge_data(df, ["src", "dst"])
with pytest.raises(NotImplementedError, match="only exist in edge"):
empty_pG.renumber_vertices_by_type(prev_id_column)
@pytest.mark.mg
@pytest.mark.parametrize("prev_id_column", [None, "prev_id"])
def test_renumber_edges_by_type(dataset1_MGPropertyGraph, prev_id_column):
from cugraph.experimental import MGPropertyGraph
(pG, data) = dataset1_MGPropertyGraph
with pytest.raises(ValueError, match="existing column"):
pG.renumber_edges_by_type("time")
df_id_ranges = pG.renumber_edges_by_type(prev_id_column)
expected = {
"referrals": [0, 5], # stop is inclusive
"relationships": [6, 9],
"transactions": [10, 13],
}
for key, (start, stop) in expected.items():
assert df_id_ranges.loc[key, "start"] == start
assert df_id_ranges.loc[key, "stop"] == stop
df = pG.get_edge_data(types=[key]).compute().to_pandas()
df = df.reset_index(drop=True)
assert len(df) == stop - start + 1
assert (df[pG.edge_id_col_name] == pd.Series(range(start, stop + 1))).all()
if prev_id_column is not None:
assert prev_id_column in df.columns
empty_pG = MGPropertyGraph()
assert empty_pG.renumber_edges_by_type(prev_id_column) is None
@pytest.mark.mg
def test_renumber_vertices_edges_dtypes(dask_client):
from cugraph.experimental import MGPropertyGraph
edgelist_df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": cp.array([0, 5, 2, 3, 4, 3], dtype="int32"),
"dst": cp.array([2, 4, 4, 5, 1, 2], dtype="int32"),
"eid": cp.array([8, 7, 5, 2, 9, 1], dtype="int32"),
}
),
npartitions=2,
)
vertex_df = dask_cudf.from_cudf(
cudf.DataFrame(
{
"v": cp.array([0, 1, 2, 3, 4, 5], dtype="int32"),
"p": [5, 10, 15, 20, 25, 30],
}
),
npartitions=2,
)
pG = MGPropertyGraph()
pG.add_vertex_data(
vertex_df, vertex_col_name="v", property_columns=["p"], type_name="vt1"
)
pG.add_edge_data(
edgelist_df,
vertex_col_names=["src", "dst"],
edge_id_col_name="eid",
type_name="et1",
)
pG.renumber_vertices_by_type()
vd = pG.get_vertex_data()
assert vd.index.dtype == cp.int32
pG.renumber_edges_by_type()
ed = pG.get_edge_data()
assert ed[pG.edge_id_col_name].dtype == cp.int32
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="FIXME: MG test fails on single-GPU")
@pytest.mark.parametrize("set_index", [True, False])
def test_add_data_noncontiguous(dask_client, set_index):
from cugraph.experimental import MGPropertyGraph
df = cudf.DataFrame(
{
"src": [0, 0, 1, 2, 2, 3, 3, 1, 2, 4],
"dst": [1, 2, 4, 3, 3, 1, 2, 4, 4, 3],
"edge_type": [
"pig",
"dog",
"cat",
"pig",
"cat",
"pig",
"dog",
"pig",
"cat",
"dog",
],
}
)
counts = df["edge_type"].value_counts()
df = dask_cudf.from_cudf(df, npartitions=2)
pG = MGPropertyGraph()
for edge_type in ["cat", "dog", "pig"]:
pG.add_edge_data(
df[df.edge_type == edge_type],
vertex_col_names=["src", "dst"],
type_name=edge_type,
)
for edge_type in ["cat", "dog", "pig"]:
cur_df = pG.get_edge_data(types=edge_type).compute()
assert len(cur_df) == counts[edge_type]
assert_series_equal(
cur_df[pG.type_col_name].astype(str),
cur_df["edge_type"],
check_names=False,
)
df["vertex"] = (
100 * df["src"]
+ df["dst"]
+ df["edge_type"].map({"pig": 0, "dog": 10, "cat": 20})
)
pG = MGPropertyGraph()
for edge_type in ["cat", "dog", "pig"]:
cur_df = df[df.edge_type == edge_type]
if set_index:
cur_df = cur_df.set_index("vertex")
pG.add_vertex_data(cur_df, vertex_col_name="vertex", type_name=edge_type)
for edge_type in ["cat", "dog", "pig"]:
cur_df = pG.get_vertex_data(types=edge_type).compute()
assert len(cur_df) == counts[edge_type]
assert_series_equal(
cur_df[pG.type_col_name].astype(str),
cur_df["edge_type"],
check_names=False,
)
@pytest.mark.mg
def test_vertex_vector_property(dask_client):
from cugraph.experimental import MGPropertyGraph
(merchants, users, transactions, relationships, referrals) = dataset1.values()
pG = MGPropertyGraph()
m_df = cudf.DataFrame(columns=merchants[0], data=merchants[1])
merchants_df = dask_cudf.from_cudf(m_df, npartitions=2)
with pytest.raises(ValueError):
# Column doesn't exist
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={"vec1": ["merchant_location", "BAD_NAME"]},
)
with pytest.raises(ValueError):
# Using reserved name
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={
pG.type_col_name: ["merchant_location", "merchant_size"]
},
)
with pytest.raises(TypeError):
# String value invalid
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={"vec1": "merchant_location"},
)
with pytest.raises(ValueError):
# Length-0 vector not allowed
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={"vec1": []},
)
pG.add_vertex_data(
merchants_df,
type_name="merchants",
vertex_col_name="merchant_id",
vector_properties={
"vec1": ["merchant_location", "merchant_size", "merchant_num_employees"]
},
)
df = pG.get_vertex_data()
expected_columns = {
pG.vertex_col_name,
pG.type_col_name,
"merchant_sales",
"merchant_name",
"vec1",
}
assert set(df.columns) == expected_columns
expected = m_df[
["merchant_location", "merchant_size", "merchant_num_employees"]
].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
vec1 = pG.vertex_vector_property_to_array(df, "vec1").compute()
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(expected, vec1)
vec1 = pG.vertex_vector_property_to_array(df, "vec1", missing="error").compute()
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(expected, vec1)
with pytest.raises(ValueError):
pG.vertex_vector_property_to_array(df, "BAD_NAME")
u_df = cudf.DataFrame(columns=users[0], data=users[1])
users_df = dask_cudf.from_cudf(u_df, npartitions=2)
with pytest.raises(ValueError):
# Length doesn't match existing vector
pG.add_vertex_data(
users_df,
type_name="users",
vertex_col_name="user_id",
property_columns=["vertical"],
vector_properties={"vec1": ["user_location", "vertical"]},
)
with pytest.raises(ValueError):
# Can't assign property to existing vector column
pG.add_vertex_data(
users_df.assign(vec1=users_df["user_id"]),
type_name="users",
vertex_col_name="user_id",
property_columns=["vec1"],
)
pG.add_vertex_data(
users_df,
type_name="users",
vertex_col_name="user_id",
property_columns=["vertical"],
vector_properties={"vec2": ["user_location", "vertical"]},
)
expected_columns.update({"vec2", "vertical"})
df = pG.get_vertex_data()
assert set(df.columns) == expected_columns
vec1 = pG.vertex_vector_property_to_array(df, "vec1").compute()
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(expected, vec1)
with pytest.raises(RuntimeError):
pG.vertex_vector_property_to_array(df, "vec1", missing="error").compute()
pGusers = MGPropertyGraph()
pGusers.add_vertex_data(
users_df,
type_name="users",
vertex_col_name="user_id",
vector_property="vec3",
)
vec2 = pG.vertex_vector_property_to_array(df, "vec2").compute()
vec2 = vec2[np.lexsort(vec2.T)] # may be jumbled, so sort
df2 = pGusers.get_vertex_data()
assert set(df2.columns) == {pG.vertex_col_name, pG.type_col_name, "vec3"}
vec3 = pGusers.vertex_vector_property_to_array(df2, "vec3").compute()
vec3 = vec3[np.lexsort(vec3.T)] # may be jumbled, so sort
assert_array_equal(vec2, vec3)
vec1filled = pG.vertex_vector_property_to_array(
df, "vec1", 0, missing="error"
).compute()
vec1filled = vec1filled[np.lexsort(vec1filled.T)] # may be jumbled, so sort
expectedfilled = np.concatenate([cp.zeros((4, 3), int), expected])
assert_array_equal(expectedfilled, vec1filled)
vec1filled = pG.vertex_vector_property_to_array(df, "vec1", [0, 0, 0]).compute()
vec1filled = vec1filled[np.lexsort(vec1filled.T)] # may be jumbled, so sort
assert_array_equal(expectedfilled, vec1filled)
with pytest.raises(ValueError, match="expected 3"):
pG.vertex_vector_property_to_array(df, "vec1", [0, 0]).compute()
vec2 = pG.vertex_vector_property_to_array(df, "vec2").compute()
vec2 = vec2[np.lexsort(vec2.T)] # may be jumbled, so sort
expected = u_df[["user_location", "vertical"]].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
assert_array_equal(expected, vec2)
with pytest.raises(TypeError):
# Column is wrong type to be a vector
pG.vertex_vector_property_to_array(
df.rename(columns={"vec1": "vertical", "vertical": "vec1"}), "vec1"
)
with pytest.raises(ValueError):
# Vector column doesn't exist in dataframe
pG.vertex_vector_property_to_array(df.rename(columns={"vec1": "moved"}), "vec1")
with pytest.raises(TypeError):
# Bad type
pG.vertex_vector_property_to_array(42, "vec1")
@pytest.mark.mg
def test_edge_vector_property(dask_client):
from cugraph.experimental import MGPropertyGraph
df1 = cudf.DataFrame(
{
"src": [0, 1],
"dst": [1, 2],
"feat_0": [1, 2],
"feat_1": [10, 20],
"feat_2": [10, 20],
}
)
dd1 = dask_cudf.from_cudf(df1, npartitions=2)
df2 = cudf.DataFrame(
{
"src": [2, 3],
"dst": [1, 2],
"feat_0": [0.5, 0.2],
"feat_1": [1.5, 1.2],
}
)
dd2 = dask_cudf.from_cudf(df2, npartitions=2)
pG = MGPropertyGraph()
pG.add_edge_data(
dd1, ("src", "dst"), vector_properties={"vec1": ["feat_0", "feat_1", "feat_2"]}
)
df = pG.get_edge_data()
expected_columns = {
pG.edge_id_col_name,
pG.src_col_name,
pG.dst_col_name,
pG.type_col_name,
"vec1",
}
assert set(df.columns) == expected_columns
expected = df1[["feat_0", "feat_1", "feat_2"]].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
pGalt = MGPropertyGraph()
pGalt.add_edge_data(dd1, ("src", "dst"), vector_property="vec1")
dfalt = pG.get_edge_data()
for cur_pG, cur_df in [(pG, df), (pGalt, dfalt)]:
vec1 = cur_pG.edge_vector_property_to_array(cur_df, "vec1").compute()
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(vec1, expected)
vec1 = cur_pG.edge_vector_property_to_array(
cur_df, "vec1", missing="error"
).compute()
vec1 = vec1[np.lexsort(vec1.T)] # may be jumbled, so sort
assert_array_equal(vec1, expected)
pG.add_edge_data(
dd2, ("src", "dst"), vector_properties={"vec2": ["feat_0", "feat_1"]}
)
df = pG.get_edge_data()
expected_columns.add("vec2")
assert set(df.columns) == expected_columns
expected = df2[["feat_0", "feat_1"]].values
expected = expected[np.lexsort(expected.T)] # may be jumbled, so sort
vec2 = pG.edge_vector_property_to_array(df, "vec2").compute()
vec2 = vec2[np.lexsort(vec2.T)] # may be jumbled, so sort
assert_array_equal(vec2, expected)
with pytest.raises(RuntimeError):
pG.edge_vector_property_to_array(df, "vec2", missing="error").compute()
@pytest.mark.mg
def test_fillna_vertices():
from cugraph.experimental import MGPropertyGraph
df_edgelist = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 7, 2, 0, 1, 3, 1, 4, 5, 6],
"dst": [1, 1, 1, 3, 2, 1, 6, 5, 6, 7],
"val": [1, None, 2, None, 3, None, 4, None, 5, None],
}
),
npartitions=2,
)
df_props = dask_cudf.from_cudf(
cudf.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5, 6, 7],
"a": [0, 1, None, 2, None, 4, 1, 8],
"b": [None, 1, None, 2, None, 3, 8, 9],
}
),
npartitions=2,
)
pG = MGPropertyGraph()
pG.add_edge_data(df_edgelist, vertex_col_names=["src", "dst"])
pG.add_vertex_data(df_props, vertex_col_name="id")
pG.fillna_vertices({"a": 2, "b": 3})
assert not pG.get_vertex_data(columns=["a", "b"]).compute().isna().any().any()
assert pG.get_edge_data(columns=["val"]).compute().isna().any().any()
expected_values_prop_a = [
0,
1,
2,
2,
2,
4,
1,
8,
]
assert pG.get_vertex_data(columns=["a"])["a"].compute().values_host.tolist() == (
expected_values_prop_a
)
expected_values_prop_b = [
3,
1,
3,
2,
3,
3,
8,
9,
]
assert pG.get_vertex_data(columns=["b"])["b"].compute().values_host.tolist() == (
expected_values_prop_b
)
@pytest.mark.mg
def test_fillna_edges():
from cugraph.experimental import MGPropertyGraph
df_edgelist = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 7, 2, 0, 1, 3, 1, 4, 5, 6],
"dst": [1, 1, 1, 3, 2, 1, 6, 5, 6, 7],
"val": [1, None, 2, None, 3, None, 4, None, 5, None],
}
),
npartitions=2,
)
df_props = dask_cudf.from_cudf(
cudf.DataFrame(
{
"id": [0, 1, 2, 3, 4, 5, 6, 7],
"a": [0, 1, None, 2, None, 4, 1, 8],
"b": [None, 1, None, 2, None, 3, 8, 9],
}
),
npartitions=2,
)
pG = MGPropertyGraph()
pG.add_edge_data(df_edgelist, vertex_col_names=["src", "dst"])
pG.add_vertex_data(df_props, vertex_col_name="id")
pG.fillna_edges(2)
assert not pG.get_edge_data(columns=["val"]).compute().isna().any().any()
assert pG.get_vertex_data(columns=["a", "b"]).compute().isna().any().any()
expected_values_prop_val = [
1,
2,
2,
2,
3,
2,
4,
2,
5,
2,
]
assert pG.get_edge_data(columns=["val"])["val"].compute().values_host.tolist() == (
expected_values_prop_val
)
@pytest.mark.mg
def test_types_from_numerals(dask_client):
from cugraph.experimental import MGPropertyGraph
df_edgelist_cow = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [0, 7, 2, 0, 1],
"dst": [1, 1, 1, 3, 2],
"val": [1, 3, 2, 3, 3],
}
),
npartitions=2,
)
df_edgelist_pig = dask_cudf.from_cudf(
cudf.DataFrame(
{
"src": [3, 1, 4, 5, 6],
"dst": [1, 6, 5, 6, 7],
"val": [5, 4, 5, 5, 2],
}
),
npartitions=2,
)
df_props_duck = dask_cudf.from_cudf(
cudf.DataFrame(
{
"id": [0, 1, 2, 3],
"a": [0, 1, 6, 2],
"b": [2, 1, 2, 2],
}
),
npartitions=2,
)
df_props_goose = dask_cudf.from_cudf(
cudf.DataFrame(
{
"id": [4, 5, 6, 7],
"a": [5, 4, 1, 8],
"b": [2, 3, 8, 9],
}
),
npartitions=2,
)
pG = MGPropertyGraph()
pG.add_edge_data(df_edgelist_cow, vertex_col_names=["src", "dst"], type_name="cow")
pG.add_edge_data(df_edgelist_pig, vertex_col_names=["src", "dst"], type_name="pig")
pG.add_vertex_data(df_props_duck, vertex_col_name="id", type_name="duck")
pG.add_vertex_data(df_props_goose, vertex_col_name="id", type_name="goose")
assert pG.vertex_types_from_numerals(
cudf.Series([0, 1, 0, 0, 1, 0, 1, 1])
).values_host.tolist() == [
"duck",
"goose",
"duck",
"duck",
"goose",
"duck",
"goose",
"goose",
]
assert pG.edge_types_from_numerals(
cudf.Series([1, 1, 0, 1, 1, 0, 0, 1, 1])
).values_host.tolist() == [
"pig",
"pig",
"cow",
"pig",
"pig",
"cow",
"cow",
"pig",
"pig",
]
@pytest.mark.mg
def test_renumber_by_type_only_default_type(dask_client):
from cugraph.experimental import MGPropertyGraph
pG = MGPropertyGraph()
df = cudf.DataFrame(
{
"src": cp.array([0, 0, 1, 2, 2, 3], dtype="int32"),
"dst": cp.array([1, 2, 4, 3, 4, 1], dtype="int32"),
}
)
ddf = dask_cudf.from_cudf(df, npartitions=2)
pG.add_edge_data(ddf, vertex_col_names=["src", "dst"])
df2 = cudf.DataFrame(
{
"prop1": [100, 200, 300, 400, 500],
"prop2": [5, 4, 3, 2, 1],
"id": cp.array([0, 1, 2, 3, 4], dtype="int32"),
}
)
ddf2 = dask_cudf.from_cudf(df2, npartitions=2)
pG.add_vertex_data(ddf2, vertex_col_name="id")
pG.renumber_vertices_by_type()
got = pG.get_vertex_data().compute()
assert got[pG.vertex_col_name].to_arrow().to_pylist() == list(range(len(got)))
pG.renumber_edges_by_type()
got = pG.get_edge_data().compute()
assert got[pG.edge_id_col_name].to_arrow().to_pylist() == list(range(len(got)))
# =============================================================================
# Benchmarks
# =============================================================================
@pytest.mark.slow
@pytest.mark.parametrize("N", [1, 3, 10, 30])
def bench_add_edges_cyber(gpubenchmark, dask_client, N):
from cugraph.experimental import MGPropertyGraph
# Partition the dataframe to add in chunks
cyber_df = cyber.get_edgelist()
chunk = (len(cyber_df) + N - 1) // N
dfs = [
dask_cudf.from_cudf(cyber_df.iloc[i * chunk : (i + 1) * chunk], npartitions=2)
for i in range(N)
]
def func():
mpG = MGPropertyGraph()
for df in dfs:
mpG.add_edge_data(df, ("srcip", "dstip"))
df = mpG.get_edge_data().compute()
assert len(df) == len(cyber_df)
gpubenchmark(func)
@pytest.mark.slow
@pytest.mark.parametrize("n_rows", [1_000_000])
@pytest.mark.parametrize("n_feats", [128])
def bench_get_vector_features(gpubenchmark, dask_client, n_rows, n_feats):
from cugraph.experimental import MGPropertyGraph
df = cudf.DataFrame(
{
"src": cp.arange(0, n_rows, dtype=cp.int32),
"dst": cp.arange(0, n_rows, dtype=cp.int32) + 1,
}
)
for i in range(n_feats):
df[f"feat_{i}"] = cp.ones(len(df), dtype=cp.int32)
df = dask_cudf.from_cudf(df, npartitions=16)
vector_properties = {"feat": [f"feat_{i}" for i in range(n_feats)]}
pG = MGPropertyGraph()
pG.add_edge_data(
df, vertex_col_names=["src", "dst"], vector_properties=vector_properties
)
def func(pG):
df = pG.get_edge_data(edge_ids=cp.arange(0, 100_000))
df = df.compute()
gpubenchmark(func, pG)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_prediction/test_overlap.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import numpy as np
import scipy
import cudf
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS
from cudf.testing import assert_series_equal
from cudf.testing.testing import assert_frame_equal
SRC_COL = "0"
DST_COL = "1"
VERTEX_PAIR_FIRST_COL = "first"
VERTEX_PAIR_SECOND_COL = "second"
OVERLAP_COEFF_COL = "overlap_coeff"
EDGE_ATT_COL = "weight"
MULTI_COL_SRC_0_COL = "src_0"
MULTI_COL_DST_0_COL = "dst_0"
MULTI_COL_SRC_1_COL = "src_1"
MULTI_COL_DST_1_COL = "dst_1"
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Helper functions
# =============================================================================
def compare_overlap(cu_coeff, cpu_coeff):
assert len(cu_coeff) == len(cpu_coeff)
for i in range(len(cu_coeff)):
if np.isnan(cpu_coeff[i]):
assert np.isnan(cu_coeff[i])
elif np.isnan(cu_coeff[i]):
assert cpu_coeff[i] == cu_coeff[i]
else:
diff = abs(cpu_coeff[i] - cu_coeff[i])
assert diff < 1.0e-6
def cugraph_call(benchmark_callable, graph_file, pairs, use_weight=False):
# Device data
G = graph_file.get_graph(
create_using=cugraph.Graph(directed=False), ignore_weights=not use_weight
)
# cugraph Overlap Call
df = benchmark_callable(cugraph.overlap, G, pairs)
df = df.sort_values(by=[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]).reset_index(
drop=True
)
if use_weight:
res_w_overlap = cugraph.overlap_w(G, vertex_pair=pairs)
assert_frame_equal(res_w_overlap, df, check_dtype=False, check_like=True)
return df[OVERLAP_COEFF_COL].to_numpy()
def intersection(a, b, M):
count = 0
a_idx = M.indptr[a]
b_idx = M.indptr[b]
while (a_idx < M.indptr[a + 1]) and (b_idx < M.indptr[b + 1]):
a_vertex = M.indices[a_idx]
b_vertex = M.indices[b_idx]
if a_vertex == b_vertex:
count += 1
a_idx += 1
b_idx += 1
elif a_vertex < b_vertex:
a_idx += 1
else:
b_idx += 1
return count
def degree(a, M):
return M.indptr[a + 1] - M.indptr[a]
def overlap(a, b, M):
b_sum = degree(b, M)
if b_sum == 0:
return float("NaN")
a_sum = degree(a, M)
i = intersection(a, b, M)
total = min(a_sum, b_sum)
return i / total
def cpu_call(M, first, second):
result = []
for i in range(len(first)):
result.append(overlap(first[i], second[i], M))
return result
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture(scope="module", params=UNDIRECTED_DATASETS)
def read_csv(request):
"""
Read csv file for both networkx and cugraph
"""
graph_file = request.param
dataset_path = graph_file.get_path()
Mnx = utils.read_csv_for_nx(dataset_path)
N = max(max(Mnx[SRC_COL]), max(Mnx[DST_COL])) + 1
M = scipy.sparse.csr_matrix(
(Mnx.weight, (Mnx[SRC_COL], Mnx[DST_COL])), shape=(N, N)
)
return M, graph_file
@pytest.fixture(scope="module")
def extract_two_hop(read_csv):
"""
Build graph and extract two hop neighbors
"""
_, graph_file = read_csv
G = graph_file.get_graph(ignore_weights=True)
pairs = (
G.get_two_hop_neighbors()
.sort_values([VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL])
.reset_index(drop=True)
)
return pairs
# Test
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_overlap(gpubenchmark, read_csv, extract_two_hop, use_weight):
M, graph_file = read_csv
pairs = extract_two_hop
cu_coeff = cugraph_call(gpubenchmark, graph_file, pairs, use_weight=use_weight)
cpu_coeff = cpu_call(M, pairs[VERTEX_PAIR_FIRST_COL], pairs[VERTEX_PAIR_SECOND_COL])
compare_overlap(cu_coeff, cpu_coeff)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
@pytest.mark.parametrize("use_weight", [False, True])
def test_directed_graph_check(graph_file, use_weight):
M = utils.read_csv_for_nx(graph_file.get_path())
cu_M = cudf.DataFrame()
cu_M[SRC_COL] = cudf.Series(M[SRC_COL])
cu_M[DST_COL] = cudf.Series(M[DST_COL])
if use_weight:
cu_M[EDGE_ATT_COL] = cudf.Series(M[EDGE_ATT_COL])
G1 = cugraph.Graph(directed=True)
weight = EDGE_ATT_COL if use_weight else None
G1.from_cudf_edgelist(cu_M, source=SRC_COL, destination=DST_COL, weight=weight)
vertex_pair = cu_M[[SRC_COL, DST_COL]]
vertex_pair = vertex_pair[:5]
with pytest.raises(ValueError):
cugraph.overlap(G1, vertex_pair, use_weight)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
@pytest.mark.parametrize("use_weight", [False, True])
def test_overlap_multi_column(graph_file, use_weight):
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
cu_M = cudf.DataFrame()
cu_M[MULTI_COL_SRC_0_COL] = cudf.Series(M[SRC_COL])
cu_M[MULTI_COL_DST_0_COL] = cudf.Series(M[DST_COL])
cu_M[MULTI_COL_SRC_1_COL] = cu_M[MULTI_COL_SRC_0_COL] + 1000
cu_M[MULTI_COL_DST_1_COL] = cu_M[MULTI_COL_DST_0_COL] + 1000
if use_weight:
cu_M[EDGE_ATT_COL] = cudf.Series(M[EDGE_ATT_COL])
G1 = cugraph.Graph()
weight = EDGE_ATT_COL if use_weight else None
G1.from_cudf_edgelist(
cu_M,
source=[MULTI_COL_SRC_0_COL, MULTI_COL_SRC_1_COL],
destination=[MULTI_COL_DST_0_COL, MULTI_COL_DST_1_COL],
weight=weight,
)
vertex_pair = cu_M[
[
MULTI_COL_SRC_0_COL,
MULTI_COL_SRC_1_COL,
MULTI_COL_DST_0_COL,
MULTI_COL_DST_1_COL,
]
]
vertex_pair = vertex_pair[:5]
df_multi_col_res = cugraph.overlap(G1, vertex_pair, use_weight=use_weight)
G2 = cugraph.Graph()
G2.from_cudf_edgelist(
cu_M, source=MULTI_COL_SRC_0_COL, destination=MULTI_COL_DST_0_COL, weight=weight
)
df_single_col_res = cugraph.overlap(
G2, vertex_pair[[MULTI_COL_SRC_0_COL, MULTI_COL_DST_0_COL]]
)
# Calculating mismatch
actual = df_multi_col_res.sort_values("0_src").reset_index()
expected = df_single_col_res.sort_values(VERTEX_PAIR_FIRST_COL).reset_index()
assert_series_equal(actual[OVERLAP_COEFF_COL], expected[OVERLAP_COEFF_COL])
@pytest.mark.sg
def test_weighted_overlap():
karate = UNDIRECTED_DATASETS[0]
G = karate.get_graph(ignore_weights=True)
with pytest.raises(ValueError):
cugraph.overlap(G, use_weight=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_prediction/test_sorensen_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import cugraph
import dask_cudf
import cugraph.dask as dcg
from cugraph.testing import utils
from cugraph.dask.common.mg_utils import is_single_gpu
from pylibcugraph.testing import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [False]
HAS_VERTEX_PAIR = [True, False]
IS_WEIGHTED = [True, False]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED + [
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv"
]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
(HAS_VERTEX_PAIR, "has_vertex_pair"),
(IS_WEIGHTED, "is_weighted"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(("graph_file", "directed", "has_vertex_pair", "is_weighted"), request.param)
)
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the Sorensen algo.
(based on cuGraph Sorensen) which can be used for validation.
"""
input_data_path = input_combo["graph_file"]
directed = input_combo["directed"]
has_vertex_pair = input_combo["has_vertex_pair"]
is_weighted = input_combo["is_weighted"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=directed, edgevals=is_weighted
)
if has_vertex_pair:
# Sample random vertices from the graph and compute the two_hop_neighbors
# with those seeds
k = random.randint(1, 10)
seeds = random.sample(range(G.number_of_vertices()), k)
vertex_pair = G.get_two_hop_neighbors(start_vertices=seeds)
else:
vertex_pair = None
input_combo["vertex_pair"] = vertex_pair
sg_cugraph_sorensen = cugraph.sorensen(
G, input_combo["vertex_pair"], use_weight=is_weighted
)
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
input_combo["sg_cugraph_results"] = sg_cugraph_sorensen
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value" if is_weighted else None,
renumber=True,
store_transposed=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
def test_dask_mg_sorensen(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
use_weight = input_expected_output["is_weighted"]
result_sorensen = benchmark(
dcg.sorensen, dg, input_expected_output["vertex_pair"], use_weight=use_weight
)
result_sorensen = (
result_sorensen.compute()
.sort_values(["first", "second"])
.reset_index(drop=True)
.rename(columns={"sorensen_coeff": "mg_cugraph_sorensen_coeff"})
)
expected_output = (
input_expected_output["sg_cugraph_results"]
.sort_values(["first", "second"])
.reset_index(drop=True)
)
# Update the dask cugraph sorensen results with sg cugraph results for easy
# comparison using cuDF DataFrame methods.
result_sorensen["sg_cugraph_sorensen_coeff"] = expected_output["sorensen_coeff"]
sorensen_coeff_diffs1 = result_sorensen.query(
"mg_cugraph_sorensen_coeff - sg_cugraph_sorensen_coeff > 0.00001"
)
sorensen_coeff_diffs2 = result_sorensen.query(
"mg_cugraph_sorensen_coeff - sg_cugraph_sorensen_coeff < -0.00001"
)
assert len(sorensen_coeff_diffs1) == 0
assert len(sorensen_coeff_diffs2) == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_prediction/test_jaccard_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
from pylibcugraph.testing import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [False]
HAS_VERTEX_PAIR = [True, False]
IS_WEIGHTED = [True, False]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED + [
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv"
]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
(HAS_VERTEX_PAIR, "has_vertex_pair"),
(IS_WEIGHTED, "is_weighted"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(("graph_file", "directed", "has_vertex_pair", "is_weighted"), request.param)
)
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the Jaccard algo.
(based on cuGraph Jaccard) which can be used for validation.
"""
input_data_path = input_combo["graph_file"]
directed = input_combo["directed"]
has_vertex_pair = input_combo["has_vertex_pair"]
is_weighted = input_combo["is_weighted"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=directed, edgevals=is_weighted
)
if has_vertex_pair:
# Sample random vertices from the graph and compute the two_hop_neighbors
# with those seeds
k = random.randint(1, 10)
seeds = random.sample(range(G.number_of_vertices()), k)
vertex_pair = G.get_two_hop_neighbors(start_vertices=seeds)
else:
vertex_pair = None
input_combo["vertex_pair"] = vertex_pair
sg_cugraph_jaccard = cugraph.jaccard(
G, input_combo["vertex_pair"], use_weight=is_weighted
)
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
input_combo["sg_cugraph_results"] = sg_cugraph_jaccard
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value" if is_weighted else None,
renumber=True,
store_transposed=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.mg
def test_dask_mg_jaccard(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
use_weight = input_expected_output["is_weighted"]
result_jaccard = benchmark(
dcg.jaccard, dg, input_expected_output["vertex_pair"], use_weight=use_weight
)
result_jaccard = (
result_jaccard.compute()
.sort_values(["first", "second"])
.reset_index(drop=True)
.rename(columns={"jaccard_coeff": "mg_cugraph_jaccard_coeff"})
)
expected_output = (
input_expected_output["sg_cugraph_results"]
.sort_values(["first", "second"])
.reset_index(drop=True)
)
# Update the dask cugraph Jaccard results with sg cugraph results for easy
# comparison using cuDF DataFrame methods.
result_jaccard["sg_cugraph_jaccard_coeff"] = expected_output["jaccard_coeff"]
jaccard_coeff_diffs1 = result_jaccard.query(
"mg_cugraph_jaccard_coeff - sg_cugraph_jaccard_coeff > 0.00001"
)
jaccard_coeff_diffs2 = result_jaccard.query(
"mg_cugraph_jaccard_coeff - sg_cugraph_jaccard_coeff < -0.00001"
)
assert len(jaccard_coeff_diffs1) == 0
assert len(jaccard_coeff_diffs2) == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_prediction/test_jaccard.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# FIXME: Can we use global variables for column names instead of hardcoded ones?
import gc
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.datasets import netscience
from cugraph.testing import utils, UNDIRECTED_DATASETS
from cudf.testing import assert_series_equal
from cudf.testing.testing import assert_frame_equal
SRC_COL = "0"
DST_COL = "1"
VERTEX_PAIR_FIRST_COL = "first"
VERTEX_PAIR_SECOND_COL = "second"
JACCARD_COEFF_COL = "jaccard_coeff"
EDGE_ATT_COL = "weight"
MULTI_COL_SRC_0_COL = "src_0"
MULTI_COL_DST_0_COL = "dst_0"
MULTI_COL_SRC_1_COL = "src_1"
MULTI_COL_DST_1_COL = "dst_1"
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Helper functions
# =============================================================================
def compare_jaccard_two_hop(G, Gnx, use_weight=False):
"""
Compute both cugraph and nx jaccard after extracting the two hop neighbors
from G and compare both results
"""
pairs = (
G.get_two_hop_neighbors()
.sort_values([VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL])
.reset_index(drop=True)
)
df = cugraph.jaccard(G, pairs)
df = df.sort_values(by=[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]).reset_index(
drop=True
)
if not use_weight:
nx_pairs = list(pairs.to_records(index=False))
preds = nx.jaccard_coefficient(Gnx, nx_pairs)
nx_coeff = []
for u, v, p in preds:
nx_coeff.append(p)
assert len(nx_coeff) == len(df)
for i in range(len(df)):
diff = abs(nx_coeff[i] - df[JACCARD_COEFF_COL].iloc[i])
assert diff < 1.0e-6
else:
# FIXME: compare results against resultset api
pass
def cugraph_call(benchmark_callable, graph_file, input_df=None, use_weight=False):
G = cugraph.Graph()
G = graph_file.get_graph(ignore_weights=not use_weight)
# If no vertex_pair is passed as input, 'cugraph.jaccard' will
# compute the 'jaccard_similarity' with the two_hop_neighbor of the
# entire graph while nx compute with the one_hop_neighbor. For better
# comparaison, get the one_hop_neighbor of the entire graph for 'cugraph.jaccard'
# and pass it as vertex_pair
if isinstance(input_df, cudf.DataFrame):
vertex_pair = input_df.rename(
columns={SRC_COL: VERTEX_PAIR_FIRST_COL, DST_COL: VERTEX_PAIR_SECOND_COL}
)
vertex_pair = vertex_pair[[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]]
else:
vertex_pair = cudf.DataFrame(
columns=[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL],
dtype=G.edgelist.edgelist_df["src"].dtype,
)
# cugraph Jaccard Call
df = benchmark_callable(
cugraph.jaccard, G, vertex_pair=vertex_pair, use_weight=use_weight
)
df = df.sort_values([VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]).reset_index(
drop=True
)
return (
df[VERTEX_PAIR_FIRST_COL].to_numpy(),
df[VERTEX_PAIR_SECOND_COL].to_numpy(),
df[JACCARD_COEFF_COL].to_numpy(),
)
def networkx_call(M, benchmark_callable=None):
sources = M[SRC_COL]
destinations = M[DST_COL]
edges = []
for i in range(len(M)):
edges.append((sources[i], destinations[i]))
edges.append((destinations[i], sources[i]))
edges = list(dict.fromkeys(edges))
edges = sorted(edges)
# in NVGRAPH tests we read as CSR and feed as CSC, so here we doing this
# explicitly
print("Format conversion ... ")
Gnx = nx.from_pandas_edgelist(
M,
source=SRC_COL,
target=DST_COL,
edge_attr=EDGE_ATT_COL,
create_using=nx.Graph(),
)
# Networkx Jaccard Call
print("Solving... ")
if benchmark_callable is not None:
preds = benchmark_callable(nx.jaccard_coefficient, Gnx, edges)
else:
preds = nx.jaccard_coefficient(Gnx, edges)
src = []
dst = []
coeff = []
for u, v, p in preds:
src.append(u)
dst.append(v)
coeff.append(p)
return src, dst, coeff
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture(scope="module", params=UNDIRECTED_DATASETS)
def read_csv(request):
"""
Read csv file for both networkx and cugraph
"""
graph_file = request.param
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
M_cu = utils.read_csv_file(dataset_path)
return M_cu, M, graph_file
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_jaccard(read_csv, gpubenchmark, use_weight):
M_cu, M, graph_file = read_csv
cu_src, cu_dst, cu_coeff = cugraph_call(
gpubenchmark, graph_file, input_df=M_cu, use_weight=use_weight
)
if not use_weight:
nx_src, nx_dst, nx_coeff = networkx_call(M)
# Calculating mismatch
err = 0
tol = 1.0e-06
assert len(cu_coeff) == len(nx_coeff)
for i in range(len(cu_coeff)):
if abs(cu_coeff[i] - nx_coeff[i]) > tol * 1.1:
err += 1
print("Mismatches: %d" % err)
assert err == 0
else:
G = graph_file.get_graph()
res_w_jaccard = cugraph.jaccard_w(G, vertex_pair=M_cu[[SRC_COL, DST_COL]])
res_w_jaccard = res_w_jaccard.sort_values(
[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]
).reset_index(drop=True)
res_jaccard = cudf.DataFrame()
res_jaccard[VERTEX_PAIR_FIRST_COL] = cu_src
res_jaccard[VERTEX_PAIR_SECOND_COL] = cu_dst
res_jaccard[JACCARD_COEFF_COL] = cu_coeff
assert_frame_equal(
res_w_jaccard, res_jaccard, check_dtype=False, check_like=True
)
# FIXME: compare weighted jaccard results against resultset api
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_directed_graph_check(read_csv, use_weight):
_, M, _ = read_csv
cu_M = cudf.DataFrame()
cu_M[SRC_COL] = cudf.Series(M[SRC_COL])
cu_M[DST_COL] = cudf.Series(M[DST_COL])
if use_weight:
cu_M[EDGE_ATT_COL] = cudf.Series(M[EDGE_ATT_COL])
G1 = cugraph.Graph(directed=True)
weight = EDGE_ATT_COL if use_weight else None
G1.from_cudf_edgelist(cu_M, source=SRC_COL, destination=DST_COL, weight=weight)
vertex_pair = cu_M[[SRC_COL, DST_COL]]
vertex_pair = vertex_pair[:5]
with pytest.raises(ValueError):
cugraph.jaccard(G1, vertex_pair, use_weight)
@pytest.mark.sg
def test_nx_jaccard_time(read_csv, gpubenchmark):
_, M, _ = read_csv
nx_src, nx_dst, nx_coeff = networkx_call(M, gpubenchmark)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", [netscience])
@pytest.mark.parametrize("use_weight", [False, True])
def test_jaccard_edgevals(gpubenchmark, graph_file, use_weight):
dataset_path = netscience.get_path()
M = utils.read_csv_for_nx(dataset_path)
M_cu = utils.read_csv_file(dataset_path)
cu_src, cu_dst, cu_coeff = cugraph_call(
gpubenchmark, netscience, input_df=M_cu, use_weight=use_weight
)
if not use_weight:
nx_src, nx_dst, nx_coeff = networkx_call(M)
# Calculating mismatch
err = 0
tol = 1.0e-06
assert len(cu_coeff) == len(nx_coeff)
for i in range(len(cu_coeff)):
if abs(cu_coeff[i] - nx_coeff[i]) > tol * 1.1:
err += 1
print("Mismatches: %d" % err)
assert err == 0
else:
# FIXME: compare results against resultset api
pass
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_jaccard_two_hop(read_csv, use_weight):
_, M, graph_file = read_csv
Gnx = nx.from_pandas_edgelist(
M, source=SRC_COL, target=DST_COL, create_using=nx.Graph()
)
G = graph_file.get_graph(ignore_weights=not use_weight)
compare_jaccard_two_hop(G, Gnx, use_weight)
@pytest.mark.sg
def test_jaccard_nx(read_csv):
M_cu, M, _ = read_csv
Gnx = nx.from_pandas_edgelist(
M, source=SRC_COL, target=DST_COL, create_using=nx.Graph()
)
nx_j = nx.jaccard_coefficient(Gnx)
nv_js = sorted(nx_j, key=len, reverse=True)
ebunch = M_cu.rename(
columns={SRC_COL: VERTEX_PAIR_FIRST_COL, DST_COL: VERTEX_PAIR_SECOND_COL}
)
ebunch = ebunch[[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]]
cg_j = cugraph.jaccard_coefficient(Gnx, ebunch=ebunch)
assert len(nv_js) > len(cg_j)
# FIXME: Nx does a full all-pair Jaccard.
# cuGraph does a limited 1-hop Jaccard
# assert nx_j == cg_j
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
@pytest.mark.parametrize("use_weight", [False, True])
def test_jaccard_multi_column(graph_file, use_weight):
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
cu_M = cudf.DataFrame()
cu_M[MULTI_COL_SRC_0_COL] = cudf.Series(M[SRC_COL])
cu_M[MULTI_COL_DST_0_COL] = cudf.Series(M[DST_COL])
cu_M[MULTI_COL_SRC_1_COL] = cu_M[MULTI_COL_SRC_0_COL] + 1000
cu_M[MULTI_COL_DST_1_COL] = cu_M[MULTI_COL_DST_0_COL] + 1000
if use_weight:
cu_M[EDGE_ATT_COL] = cudf.Series(M[EDGE_ATT_COL])
G1 = cugraph.Graph()
weight = EDGE_ATT_COL if use_weight else None
G1.from_cudf_edgelist(
cu_M,
source=[MULTI_COL_SRC_0_COL, MULTI_COL_SRC_1_COL],
destination=[MULTI_COL_DST_0_COL, MULTI_COL_DST_1_COL],
weight=weight,
)
vertex_pair = cu_M[
[
MULTI_COL_SRC_0_COL,
MULTI_COL_SRC_1_COL,
MULTI_COL_DST_0_COL,
MULTI_COL_DST_1_COL,
]
]
vertex_pair = vertex_pair[:5]
df_multi_col_res = cugraph.jaccard(G1, vertex_pair)
G2 = cugraph.Graph()
G2.from_cudf_edgelist(
cu_M, source=MULTI_COL_SRC_0_COL, destination=MULTI_COL_DST_0_COL, weight=weight
)
df_single_col_res = cugraph.jaccard(
G2, vertex_pair[[MULTI_COL_SRC_0_COL, MULTI_COL_DST_0_COL]]
)
# Calculating mismatch
actual = df_multi_col_res.sort_values("0_src").reset_index()
expected = df_single_col_res.sort_values(VERTEX_PAIR_FIRST_COL).reset_index()
assert_series_equal(actual[JACCARD_COEFF_COL], expected[JACCARD_COEFF_COL])
@pytest.mark.sg
def test_weighted_jaccard():
karate = UNDIRECTED_DATASETS[0]
G = karate.get_graph(ignore_weights=True)
with pytest.raises(ValueError):
cugraph.jaccard(G, use_weight=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_prediction/test_overlap_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import cugraph
import dask_cudf
import cugraph.dask as dcg
from cugraph.testing import utils
from pylibcugraph.testing import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [False]
HAS_VERTEX_PAIR = [True, False]
IS_WEIGHTED = [True, False]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED + [
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv"
]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
(IS_DIRECTED, "directed"),
(HAS_VERTEX_PAIR, "has_vertex_pair"),
(IS_WEIGHTED, "is_weighted"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(
zip(("graph_file", "directed", "has_vertex_pair", "is_weighted"), request.param)
)
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the overlap algo.
(based on cuGraph overlap) which can be used for validation.
"""
input_data_path = input_combo["graph_file"]
directed = input_combo["directed"]
has_vertex_pair = input_combo["has_vertex_pair"]
is_weighted = input_combo["is_weighted"]
G = utils.generate_cugraph_graph_from_file(
input_data_path, directed=directed, edgevals=is_weighted
)
if has_vertex_pair:
# Sample random vertices from the graph and compute the two_hop_neighbors
# with those seeds
k = random.randint(1, 10)
seeds = random.sample(range(G.number_of_vertices()), k)
vertex_pair = G.get_two_hop_neighbors(start_vertices=seeds)
else:
vertex_pair = None
input_combo["vertex_pair"] = vertex_pair
sg_cugraph_overlap = cugraph.overlap(
G, input_combo["vertex_pair"], use_weight=is_weighted
)
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
input_combo["sg_cugraph_results"] = sg_cugraph_overlap
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value" if is_weighted else None,
renumber=True,
store_transposed=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
def test_dask_mg_overlap(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
use_weight = input_expected_output["is_weighted"]
result_overlap = benchmark(
dcg.overlap, dg, input_expected_output["vertex_pair"], use_weight=use_weight
)
result_overlap = (
result_overlap.compute()
.sort_values(["first", "second"])
.reset_index(drop=True)
.rename(columns={"overlap_coeff": "mg_cugraph_overlap_coeff"})
)
expected_output = (
input_expected_output["sg_cugraph_results"]
.sort_values(["first", "second"])
.reset_index(drop=True)
)
# Update the dask cugraph overlap results with sg cugraph results for easy
# comparison using cuDF DataFrame methods.
result_overlap["sg_cugraph_overlap_coeff"] = expected_output["overlap_coeff"]
overlap_coeff_diffs1 = result_overlap.query(
"mg_cugraph_overlap_coeff - sg_cugraph_overlap_coeff > 0.00001"
)
overlap_coeff_diffs2 = result_overlap.query(
"mg_cugraph_overlap_coeff - sg_cugraph_overlap_coeff < -0.00001"
)
assert len(overlap_coeff_diffs1) == 0
assert len(overlap_coeff_diffs2) == 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_prediction/test_sorensen.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS
from cugraph.datasets import netscience
from cudf.testing import assert_series_equal
from cudf.testing.testing import assert_frame_equal
SRC_COL = "0"
DST_COL = "1"
VERTEX_PAIR_FIRST_COL = "first"
VERTEX_PAIR_SECOND_COL = "second"
SORENSEN_COEFF_COL = "sorensen_coeff"
EDGE_ATT_COL = "weight"
MULTI_COL_SRC_0_COL = "src_0"
MULTI_COL_DST_0_COL = "dst_0"
MULTI_COL_SRC_1_COL = "src_1"
MULTI_COL_DST_1_COL = "dst_1"
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Helper functions
# =============================================================================
def compare_sorensen_two_hop(G, Gnx, use_weight=False):
"""
Compute both cugraph and nx sorensen after extracting the two hop neighbors
from G and compare both results
"""
pairs = (
G.get_two_hop_neighbors()
.sort_values([VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL])
.reset_index(drop=True)
)
# print(f'G = {G.edgelist.edgelist_df}')
df = cugraph.sorensen(G, pairs)
df = df.sort_values(by=[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]).reset_index(
drop=True
)
if not use_weight:
nx_pairs = list(pairs.to_records(index=False))
# print(f'nx_pairs = {len(nx_pairs)}')
preds = nx.jaccard_coefficient(Gnx, nx_pairs)
# FIXME: Use known correct values of Sorensen for few graphs,
# hardcode it and compare to Cugraph Sorensen to get a more robust test
# Conversion from Networkx Jaccard to Sorensen
# No networkX equivalent
nx_coeff = list(map(lambda x: (2 * x[2]) / (1 + x[2]), preds))
assert len(nx_coeff) == len(df)
for i in range(len(df)):
diff = abs(nx_coeff[i] - df[SORENSEN_COEFF_COL].iloc[i])
assert diff < 1.0e-6
else:
# FIXME: compare results against resultset api
res_w_sorensen = cugraph.sorensen_w(G, vertex_pair=pairs)
res_w_sorensen = res_w_sorensen.sort_values(
[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]
).reset_index(drop=True)
assert_frame_equal(res_w_sorensen, df, check_dtype=False, check_like=True)
def cugraph_call(benchmark_callable, graph_file, input_df=None, use_weight=False):
G = cugraph.Graph()
G = graph_file.get_graph(ignore_weights=not use_weight)
# If no vertex_pair is passed as input, 'cugraph.sorensen' will
# compute the 'sorensen_similarity' with the two_hop_neighbor of the
# entire graph while nx compute with the one_hop_neighbor. For better
# comparaison, get the one_hop_neighbor of the entire graph for 'cugraph.sorensen'
# and pass it as vertex_pair
if isinstance(input_df, cudf.DataFrame):
vertex_pair = input_df.rename(
columns={SRC_COL: VERTEX_PAIR_FIRST_COL, DST_COL: VERTEX_PAIR_SECOND_COL}
)
vertex_pair = vertex_pair[[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]]
else:
vertex_pair = cudf.DataFrame(
columns=[VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL],
dtype=G.edgelist.edgelist_df["src"].dtype,
)
# cugraph Sorensen Call
df = benchmark_callable(cugraph.sorensen, G, vertex_pair=vertex_pair)
df = df.sort_values([VERTEX_PAIR_FIRST_COL, VERTEX_PAIR_SECOND_COL]).reset_index(
drop=True
)
return (
df[VERTEX_PAIR_FIRST_COL].to_numpy(),
df[VERTEX_PAIR_SECOND_COL].to_numpy(),
df[SORENSEN_COEFF_COL].to_numpy(),
)
def networkx_call(M, benchmark_callable=None):
sources = M[SRC_COL]
destinations = M[DST_COL]
edges = []
for i in range(len(M)):
edges.append((sources[i], destinations[i]))
edges.append((destinations[i], sources[i]))
edges = list(dict.fromkeys(edges))
edges = sorted(edges)
# in NVGRAPH tests we read as CSR and feed as CSC, so here we doing this
# explicitly
print("Format conversion ... ")
Gnx = nx.from_pandas_edgelist(
M,
source=SRC_COL,
target=DST_COL,
edge_attr=EDGE_ATT_COL,
create_using=nx.Graph(),
)
# Networkx Jaccard Call
print("Solving... ")
if benchmark_callable is not None:
preds = benchmark_callable(nx.jaccard_coefficient, Gnx, edges)
else:
preds = nx.jaccard_coefficient(Gnx, edges)
src = []
dst = []
coeff = []
for u, v, p in preds:
src.append(u)
dst.append(v)
# Conversion from Networkx Jaccard to Sorensen
# No networkX equivalent
coeff.append((2 * p) / (1 + p))
return src, dst, coeff
# =============================================================================
# Pytest Fixtures
# =============================================================================
@pytest.fixture(scope="module", params=UNDIRECTED_DATASETS)
def read_csv(request):
"""
Read csv file for both networkx and cugraph
"""
graph_file = request.param
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
M_cu = utils.read_csv_file(dataset_path)
return M_cu, M, graph_file
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_sorensen(gpubenchmark, read_csv, use_weight):
M_cu, M, graph_file = read_csv
cu_src, cu_dst, cu_coeff = cugraph_call(
gpubenchmark, graph_file, input_df=M_cu, use_weight=use_weight
)
nx_src, nx_dst, nx_coeff = networkx_call(M)
# Calculating mismatch
err = 0
tol = 1.0e-06
assert len(cu_coeff) == len(nx_coeff)
for i in range(len(cu_coeff)):
if abs(cu_coeff[i] - nx_coeff[i]) > tol * 1.1:
err += 1
print("Mismatches: %d" % err)
assert err == 0
@pytest.mark.sg
def test_nx_sorensen_time(gpubenchmark, read_csv):
_, M, _ = read_csv
nx_src, nx_dst, nx_coeff = networkx_call(M, gpubenchmark)
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_directed_graph_check(read_csv, use_weight):
_, M, _ = read_csv
cu_M = cudf.DataFrame()
cu_M[SRC_COL] = cudf.Series(M[SRC_COL])
cu_M[DST_COL] = cudf.Series(M[DST_COL])
if use_weight:
cu_M[EDGE_ATT_COL] = cudf.Series(M[EDGE_ATT_COL])
G1 = cugraph.Graph(directed=True)
weight = EDGE_ATT_COL if use_weight else None
G1.from_cudf_edgelist(cu_M, source=SRC_COL, destination=DST_COL, weight=weight)
vertex_pair = cu_M[[SRC_COL, DST_COL]]
vertex_pair = vertex_pair[:5]
with pytest.raises(ValueError):
cugraph.sorensen(G1, vertex_pair, use_weight)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", [netscience])
@pytest.mark.parametrize("use_weight", [False, True])
@pytest.mark.skip(reason="Skipping because this datasets is unrenumbered")
def test_sorensen_edgevals(gpubenchmark, graph_file, use_weight):
dataset_path = netscience.get_path()
M = utils.read_csv_for_nx(dataset_path)
M_cu = utils.read_csv_file(dataset_path)
cu_src, cu_dst, cu_coeff = cugraph_call(
gpubenchmark, netscience, input_df=M_cu, use_weight=use_weight
)
nx_src, nx_dst, nx_coeff = networkx_call(M)
# Calculating mismatch
err = 0
tol = 1.0e-06
assert len(cu_coeff) == len(nx_coeff)
for i in range(len(cu_coeff)):
if abs(cu_coeff[i] - nx_coeff[i]) > tol * 1.1:
err += 1
print("Mismatches: %d" % err)
assert err == 0
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_sorensen_two_hop(read_csv, use_weight):
_, M, graph_file = read_csv
Gnx = nx.from_pandas_edgelist(
M, source=SRC_COL, target=DST_COL, create_using=nx.Graph()
)
G = graph_file.get_graph(ignore_weights=not use_weight)
compare_sorensen_two_hop(G, Gnx, use_weight=use_weight)
@pytest.mark.sg
@pytest.mark.parametrize("use_weight", [False, True])
def test_sorensen_two_hop_edge_vals(read_csv, use_weight):
_, M, graph_file = read_csv
Gnx = nx.from_pandas_edgelist(
M,
source=SRC_COL,
target=DST_COL,
edge_attr=EDGE_ATT_COL,
create_using=nx.Graph(),
)
G = graph_file.get_graph(ignore_weights=not use_weight)
compare_sorensen_two_hop(G, Gnx, use_weight=use_weight)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", UNDIRECTED_DATASETS)
@pytest.mark.parametrize("use_weight", [False, True])
def test_sorensen_multi_column(graph_file, use_weight):
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
cu_M = cudf.DataFrame()
cu_M[MULTI_COL_SRC_0_COL] = cudf.Series(M[SRC_COL])
cu_M[MULTI_COL_DST_0_COL] = cudf.Series(M[DST_COL])
cu_M[MULTI_COL_SRC_1_COL] = cu_M[MULTI_COL_SRC_0_COL] + 1000
cu_M[MULTI_COL_DST_1_COL] = cu_M[MULTI_COL_DST_0_COL] + 1000
if use_weight:
cu_M[EDGE_ATT_COL] = cudf.Series(M[EDGE_ATT_COL])
G1 = cugraph.Graph()
weight = EDGE_ATT_COL if use_weight else None
G1.from_cudf_edgelist(
cu_M,
source=[MULTI_COL_SRC_0_COL, MULTI_COL_SRC_1_COL],
destination=[MULTI_COL_DST_0_COL, MULTI_COL_DST_1_COL],
weight=weight,
)
vertex_pair = cu_M[
[
MULTI_COL_SRC_0_COL,
MULTI_COL_SRC_1_COL,
MULTI_COL_DST_0_COL,
MULTI_COL_DST_1_COL,
]
]
vertex_pair = vertex_pair[:5]
df_multi_col_res = cugraph.sorensen(G1, vertex_pair)
G2 = cugraph.Graph()
G2.from_cudf_edgelist(
cu_M, source=MULTI_COL_SRC_0_COL, destination=MULTI_COL_DST_0_COL, weight=weight
)
df_single_col_res = cugraph.sorensen(
G2, vertex_pair[[MULTI_COL_SRC_0_COL, MULTI_COL_DST_0_COL]]
)
# Calculating mismatch
actual = df_multi_col_res.sort_values("0_src").reset_index()
expected = df_single_col_res.sort_values(VERTEX_PAIR_FIRST_COL).reset_index()
assert_series_equal(actual[SORENSEN_COEFF_COL], expected[SORENSEN_COEFF_COL])
@pytest.mark.sg
def test_weighted_sorensen():
karate = UNDIRECTED_DATASETS[0]
G = karate.get_graph(ignore_weights=True)
with pytest.raises(ValueError):
cugraph.sorensen(G, use_weight=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_analysis/test_hits.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import networkx as nx
import pandas as pd
import cudf
import cugraph
from cugraph.testing import utils, UNDIRECTED_DATASETS
from cugraph.datasets import email_Eu_core, karate
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = UNDIRECTED_DATASETS + [email_Eu_core]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
([50], "max_iter"),
# FIXME: Changed this from 1.0e-6 to 1.0e-5. NX defaults to
# FLOAT64 computation, cuGraph C++ defaults to whatever the edge weight
# is, cugraph python defaults that to FLOAT32. Does not converge at
# 1e-6 for larger graphs and FLOAT32.
([1.0e-5], "tol"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
return dict(zip(("graph_file", "max_iter", "tol"), request.param))
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns a dictionary containing all input params required to
run a HITS algo and the corresponding expected result (based on NetworkX
HITS) which can be used for validation.
"""
# Only run Nx to compute the expected result if it is not already present
# in the dictionary. This allows separate Nx-only tests that may have run
# previously on the same input_combo to save their results for re-use
# elsewhere.
if "nxResults" not in input_combo:
dataset_path = input_combo["graph_file"].get_path()
Gnx = utils.generate_nx_graph_from_file(dataset_path, directed=True)
nxResults = nx.hits(
Gnx, input_combo["max_iter"], input_combo["tol"], normalized=True
)
input_combo["nxResults"] = nxResults
return input_combo
# =============================================================================
# Tests
# =============================================================================
@pytest.mark.sg
def test_nx_hits(benchmark, input_combo):
"""
Simply run NetworkX HITS on the same set of input combinations used for the
cuGraph HITS tests.
This is only in place for generating comparison performance numbers.
"""
dataset_path = input_combo["graph_file"].get_path()
Gnx = utils.generate_nx_graph_from_file(dataset_path, directed=True)
nxResults = benchmark(
nx.hits, Gnx, input_combo["max_iter"], input_combo["tol"], normalized=True
)
# Save the results back to the input_combo dictionary to prevent redundant
# Nx runs. Other tests using the input_combo fixture will look for them,
# and if not present they will have to re-run the same Nx call.
input_combo["nxResults"] = nxResults
@pytest.mark.sg
def test_hits(benchmark, input_expected_output):
graph_file = input_expected_output["graph_file"]
G = graph_file.get_graph(create_using=cugraph.Graph(directed=True))
cugraph_hits = benchmark(
cugraph.hits, G, input_expected_output["max_iter"], input_expected_output["tol"]
)
cugraph_hits = cugraph_hits.sort_values("vertex").reset_index(drop=True)
(nx_hubs, nx_authorities) = input_expected_output["nxResults"]
# Update the cugraph HITS results with Nx results for easy comparison using
# cuDF DataFrame methods.
pdf = pd.DataFrame.from_dict(nx_hubs, orient="index").sort_index()
cugraph_hits["nx_hubs"] = cudf.Series.from_pandas(pdf[0])
pdf = pd.DataFrame.from_dict(nx_authorities, orient="index").sort_index()
cugraph_hits["nx_authorities"] = cudf.Series.from_pandas(pdf[0])
hubs_diffs1 = cugraph_hits.query("hubs - nx_hubs > 0.00001")
hubs_diffs2 = cugraph_hits.query("hubs - nx_hubs < -0.00001")
authorities_diffs1 = cugraph_hits.query("authorities - nx_authorities > 0.0001")
authorities_diffs2 = cugraph_hits.query("authorities - nx_authorities < -0.0001")
assert len(hubs_diffs1) == 0
assert len(hubs_diffs2) == 0
assert len(authorities_diffs1) == 0
assert len(authorities_diffs2) == 0
@pytest.mark.sg
def test_hits_transposed_false():
G = karate.get_graph(create_using=cugraph.Graph(directed=True))
warning_msg = (
"Pagerank expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
cugraph.pagerank(G)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_analysis/test_pagerank.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import time
import pytest
import numpy as np
import networkx as nx
import cudf
import cugraph
from cugraph.testing import utils, DEFAULT_DATASETS
from cugraph.datasets import karate
print("Networkx version : {} ".format(nx.__version__))
def cudify(d):
if d is None:
return None
k = np.fromiter(d.keys(), dtype="int32")
v = np.fromiter(d.values(), dtype="float32")
cuD = cudf.DataFrame({"vertex": k, "values": v})
return cuD
def cugraph_call(G, max_iter, tol, alpha, personalization, nstart, pre_vtx_o_wgt):
# cugraph Pagerank Call
t1 = time.time()
df = cugraph.pagerank(
G,
alpha=alpha,
max_iter=max_iter,
tol=tol,
personalization=personalization,
precomputed_vertex_out_weight=pre_vtx_o_wgt,
nstart=nstart,
)
t2 = time.time() - t1
print("Cugraph Time : " + str(t2))
# Sort Pagerank values
sorted_pr = []
df = df.sort_values("vertex").reset_index(drop=True)
pr_scores = df["pagerank"].to_numpy()
for i, rank in enumerate(pr_scores):
sorted_pr.append((i, rank))
return sorted_pr
# need a different function since the Nx version returns a dictionary
def nx_cugraph_call(G, max_iter, tol, alpha, personalization, nstart):
# cugraph Pagerank Call
t1 = time.time()
pr = cugraph.pagerank(
G,
alpha=alpha,
max_iter=max_iter,
tol=tol,
personalization=personalization,
nstart=nstart,
)
t2 = time.time() - t1
print("Cugraph Time : " + str(t2))
return pr
# The function selects personalization_perc% of accessible vertices in graph M
# and randomly assigns them personalization values
def networkx_call(Gnx, max_iter, tol, alpha, personalization_perc, nnz_vtx):
personalization = None
if personalization_perc != 0:
personalization = {}
personalization_count = int((nnz_vtx.size * personalization_perc) / 100.0)
nnz_vtx = np.random.choice(
nnz_vtx, min(nnz_vtx.size, personalization_count), replace=False
)
nnz_val = np.random.random(nnz_vtx.size)
nnz_val = nnz_val / sum(nnz_val)
for vtx, val in zip(nnz_vtx, nnz_val):
personalization[vtx] = val
z = {k: 1.0 / Gnx.number_of_nodes() for k in Gnx.nodes()}
# Networkx Pagerank Call
t1 = time.time()
pr = nx.pagerank(
Gnx,
alpha=alpha,
nstart=z,
max_iter=max_iter * 2,
tol=tol * 0.01,
personalization=personalization,
)
t2 = time.time() - t1
print("Networkx Time : " + str(t2))
return pr, personalization
# =============================================================================
# Parameters
# =============================================================================
MAX_ITERATIONS = [500]
TOLERANCE = [1.0e-06]
ALPHA = [0.85]
PERSONALIZATION_PERC = [0, 10, 50]
HAS_GUESS = [0, 1]
HAS_PRECOMPUTED = [0, 1]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# FIXME: the default set of datasets includes an asymmetric directed graph
# (email-EU-core.csv), which currently produces different results between
# cugraph and Nx and fails that test. Investigate, resolve, and use
# utils.DATASETS instead.
#
# https://github.com/rapidsai/cugraph/issues/533
#
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("max_iter", MAX_ITERATIONS)
@pytest.mark.parametrize("tol", TOLERANCE)
@pytest.mark.parametrize("alpha", ALPHA)
@pytest.mark.parametrize("personalization_perc", PERSONALIZATION_PERC)
@pytest.mark.parametrize("has_guess", HAS_GUESS)
@pytest.mark.parametrize("has_precomputed_vertex_out_weight", HAS_PRECOMPUTED)
def test_pagerank(
graph_file,
max_iter,
tol,
alpha,
personalization_perc,
has_guess,
has_precomputed_vertex_out_weight,
):
# NetworkX PageRank
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
nnz_vtx = np.unique(M[["0", "1"]])
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
networkx_pr, networkx_prsn = networkx_call(
Gnx, max_iter, tol, alpha, personalization_perc, nnz_vtx
)
cu_nstart = None
pre_vtx_o_wgt = None
if has_guess == 1:
cu_nstart = cudify(networkx_pr)
max_iter = 20
cu_prsn = cudify(networkx_prsn)
# cuGraph PageRank
G = graph_file.get_graph(create_using=cugraph.Graph(directed=True))
if has_precomputed_vertex_out_weight == 1:
df = G.view_edge_list()[["src", "wgt"]]
pre_vtx_o_wgt = (
df.groupby(["src"], as_index=False)
.sum()
.rename(columns={"src": "vertex", "wgt": "sums"})
)
cugraph_pr = cugraph_call(
G, max_iter, tol, alpha, cu_prsn, cu_nstart, pre_vtx_o_wgt
)
# Calculating mismatch
networkx_pr = sorted(networkx_pr.items(), key=lambda x: x[0])
err = 0
assert len(cugraph_pr) == len(networkx_pr)
for i in range(len(cugraph_pr)):
if (
abs(cugraph_pr[i][1] - networkx_pr[i][1]) > tol * 1.1
and cugraph_pr[i][0] == networkx_pr[i][0]
):
err = err + 1
print("Mismatches:", err)
assert err < (0.01 * len(cugraph_pr))
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("max_iter", MAX_ITERATIONS)
@pytest.mark.parametrize("tol", TOLERANCE)
@pytest.mark.parametrize("alpha", ALPHA)
@pytest.mark.parametrize("personalization_perc", PERSONALIZATION_PERC)
@pytest.mark.parametrize("has_guess", HAS_GUESS)
def test_pagerank_nx(graph_file, max_iter, tol, alpha, personalization_perc, has_guess):
# NetworkX PageRank
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
nnz_vtx = np.unique(M[["0", "1"]])
Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.DiGraph())
networkx_pr, networkx_prsn = networkx_call(
Gnx, max_iter, tol, alpha, personalization_perc, nnz_vtx
)
cu_nstart = None
if has_guess == 1:
cu_nstart = cudify(networkx_pr)
max_iter = 20
cu_prsn = cudify(networkx_prsn)
# cuGraph PageRank with Nx Graph
cugraph_pr = nx_cugraph_call(Gnx, max_iter, tol, alpha, cu_prsn, cu_nstart)
# Calculating mismatch
networkx_pr = sorted(networkx_pr.items(), key=lambda x: x[0])
cugraph_pr = sorted(cugraph_pr.items(), key=lambda x: x[0])
err = 0
assert len(cugraph_pr) == len(networkx_pr)
for i in range(len(cugraph_pr)):
if (
abs(cugraph_pr[i][1] - networkx_pr[i][1]) > tol * 1.1
and cugraph_pr[i][0] == networkx_pr[i][0]
):
err = err + 1
print(f"{cugraph_pr[i][1]} and {cugraph_pr[i][1]}")
print("Mismatches:", err)
assert err < (0.01 * len(cugraph_pr))
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
@pytest.mark.parametrize("max_iter", MAX_ITERATIONS)
@pytest.mark.parametrize("tol", TOLERANCE)
@pytest.mark.parametrize("alpha", ALPHA)
@pytest.mark.parametrize("personalization_perc", PERSONALIZATION_PERC)
@pytest.mark.parametrize("has_guess", HAS_GUESS)
@pytest.mark.parametrize("has_precomputed_vertex_out_weight", HAS_PRECOMPUTED)
def test_pagerank_multi_column(
graph_file,
max_iter,
tol,
alpha,
personalization_perc,
has_guess,
has_precomputed_vertex_out_weight,
):
# NetworkX PageRank
dataset_path = graph_file.get_path()
M = utils.read_csv_for_nx(dataset_path)
nnz_vtx = np.unique(M[["0", "1"]])
Gnx = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph()
)
networkx_pr, networkx_prsn = networkx_call(
Gnx, max_iter, tol, alpha, personalization_perc, nnz_vtx
)
cu_nstart = None
pre_vtx_o_wgt = None
if has_guess == 1:
cu_nstart_temp = cudify(networkx_pr)
max_iter = 100
cu_nstart = cudf.DataFrame()
cu_nstart["vertex_0"] = cu_nstart_temp["vertex"]
cu_nstart["vertex_1"] = cu_nstart["vertex_0"] + 1000
cu_nstart["values"] = cu_nstart_temp["values"]
cu_prsn_temp = cudify(networkx_prsn)
if cu_prsn_temp is not None:
cu_prsn = cudf.DataFrame()
cu_prsn["vertex_0"] = cu_prsn_temp["vertex"]
cu_prsn["vertex_1"] = cu_prsn["vertex_0"] + 1000
cu_prsn["values"] = cu_prsn_temp["values"]
else:
cu_prsn = cu_prsn_temp
cu_M = cudf.DataFrame()
cu_M["src_0"] = cudf.Series(M["0"])
cu_M["dst_0"] = cudf.Series(M["1"])
cu_M["src_1"] = cu_M["src_0"] + 1000
cu_M["dst_1"] = cu_M["dst_0"] + 1000
cu_M["weights"] = cudf.Series(M["weight"])
cu_G = cugraph.Graph(directed=True)
cu_G.from_cudf_edgelist(
cu_M,
source=["src_0", "src_1"],
destination=["dst_0", "dst_1"],
edge_attr="weights",
store_transposed=True,
)
if has_precomputed_vertex_out_weight == 1:
df = cu_M[["src_0", "src_1", "weights"]]
pre_vtx_o_wgt = (
df.groupby(["src_0", "src_1"], as_index=False)
.sum()
.rename(columns={"weights": "sums"})
)
df = cugraph.pagerank(
cu_G,
alpha=alpha,
max_iter=max_iter,
tol=tol,
personalization=cu_prsn,
nstart=cu_nstart,
precomputed_vertex_out_weight=pre_vtx_o_wgt,
)
cugraph_pr = []
df = df.sort_values("0_vertex").reset_index(drop=True)
pr_scores = df["pagerank"].to_numpy()
for i, rank in enumerate(pr_scores):
cugraph_pr.append((i, rank))
# Calculating mismatch
networkx_pr = sorted(networkx_pr.items(), key=lambda x: x[0])
err = 0
assert len(cugraph_pr) == len(networkx_pr)
for i in range(len(cugraph_pr)):
if (
abs(cugraph_pr[i][1] - networkx_pr[i][1]) > tol * 1.1
and cugraph_pr[i][0] == networkx_pr[i][0]
):
err = err + 1
print("Mismatches:", err)
assert err < (0.01 * len(cugraph_pr))
@pytest.mark.sg
def test_pagerank_invalid_personalization_dtype():
dataset_path = karate.get_path()
M = utils.read_csv_for_nx(dataset_path)
G = cugraph.Graph(directed=True)
cu_M = cudf.DataFrame()
cu_M["src"] = cudf.Series(M["0"])
cu_M["dst"] = cudf.Series(M["1"])
cu_M["weights"] = cudf.Series(M["weight"])
G.from_cudf_edgelist(
cu_M,
source="src",
destination="dst",
edge_attr="weights",
store_transposed=True,
)
personalization = cudf.DataFrame()
personalization["vertex"] = [17, 26]
personalization["values"] = [0.5, 0.75]
# cu_M["weights"] is of type 'float32' and personalization["values"] of type
# 'float64'. The python code should enforce that both types match nd raise the
# following warning.
warning_msg = (
"PageRank requires 'personalization' values to match the "
"graph's 'edge_attr' type. edge_attr type is: "
"float32 and got 'personalization' values "
"of type: float64."
)
with pytest.warns(UserWarning, match=warning_msg):
cugraph.pagerank(G, personalization=personalization)
# cu_M["src"] is of type 'int32' and personalization["vertex"] of type
# 'int64'. The python code should enforce that both types match and raise the
# following warning.
warning_msg = (
"PageRank requires 'personalization' vertex to match the "
"graph's 'vertex' type. input graph's vertex type is: "
"int32 and got 'personalization' vertex "
"of type: int64."
)
with pytest.warns(UserWarning, match=warning_msg):
cugraph.pagerank(G, personalization=personalization)
@pytest.mark.sg
def test_pagerank_transposed_false():
G = karate.get_graph(create_using=cugraph.Graph(directed=True))
warning_msg = (
"Pagerank expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
cugraph.pagerank(G)
@pytest.mark.sg
def test_pagerank_non_convergence():
G = karate.get_graph(create_using=cugraph.Graph(directed=True))
# Not enough allowed iterations, should not converge
with pytest.raises(cugraph.exceptions.FailedToConvergeError):
df = cugraph.pagerank(G, max_iter=1, fail_on_nonconvergence=True)
# Not enough allowed iterations, should not converge but do not consider
# that an error
(df, converged) = cugraph.pagerank(G, max_iter=1, fail_on_nonconvergence=False)
assert type(df) is cudf.DataFrame
assert type(converged) is bool
assert converged is False
# The default max_iter value should allow convergence for this graph
(df, converged) = cugraph.pagerank(G, fail_on_nonconvergence=False)
assert type(df) is cudf.DataFrame
assert type(converged) is bool
assert converged is True
# Test personalized pagerank the same way
personalization = cudf.DataFrame()
personalization["vertex"] = [17, 26]
personalization["values"] = [0.5, 0.75]
with pytest.raises(cugraph.exceptions.FailedToConvergeError):
df = cugraph.pagerank(
G, max_iter=1, personalization=personalization, fail_on_nonconvergence=True
)
(df, converged) = cugraph.pagerank(
G, max_iter=1, personalization=personalization, fail_on_nonconvergence=False
)
assert type(df) is cudf.DataFrame
assert type(converged) is bool
assert converged is False
(df, converged) = cugraph.pagerank(
G, personalization=personalization, fail_on_nonconvergence=False
)
assert type(df) is cudf.DataFrame
assert type(converged) is bool
assert converged is True
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_analysis/test_pagerank_mg.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import numpy as np
import cudf
import cugraph
import cugraph.dask as dcg
import dask_cudf
from cugraph.testing import utils
from cugraph.dask.common.mg_utils import is_single_gpu
from cugraph.testing.utils import RAPIDS_DATASET_ROOT_DIR_PATH
# The function selects personalization_perc% of accessible vertices in graph M
# and randomly assigns them personalization values
def personalize(vertices, personalization_perc):
personalization = None
if personalization_perc != 0:
personalization = {}
nnz_vtx = vertices.values_host
personalization_count = int((nnz_vtx.size * personalization_perc) / 100.0)
nnz_vtx = np.random.choice(
nnz_vtx, min(nnz_vtx.size, personalization_count), replace=False
)
nnz_val = np.random.random(nnz_vtx.size)
nnz_val = nnz_val / sum(nnz_val)
for vtx, val in zip(nnz_vtx, nnz_val):
personalization[vtx] = val
k = np.fromiter(personalization.keys(), dtype="int32")
v = np.fromiter(personalization.values(), dtype="float32")
cu_personalization = cudf.DataFrame({"vertex": k, "values": v})
return cu_personalization, personalization
def create_distributed_karate_graph(store_transposed=True):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=store_transposed)
return dg
# =============================================================================
# Parameters
# =============================================================================
PERSONALIZATION_PERC = [0, 10, 50]
IS_DIRECTED = [True, False]
HAS_GUESS = [0, 1]
HAS_PRECOMPUTED = [0, 1]
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
@pytest.mark.mg
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
@pytest.mark.parametrize("personalization_perc", PERSONALIZATION_PERC)
@pytest.mark.parametrize("directed", IS_DIRECTED)
@pytest.mark.parametrize("has_precomputed_vertex_out_weight", HAS_PRECOMPUTED)
@pytest.mark.parametrize("has_guess", HAS_GUESS)
def test_dask_mg_pagerank(
dask_client,
personalization_perc,
directed,
has_precomputed_vertex_out_weight,
has_guess,
):
input_data_path = (RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
df = cudf.read_csv(
input_data_path,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
g = cugraph.Graph(directed=directed)
g.from_cudf_edgelist(df, "src", "dst", "value")
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", "value", store_transposed=True)
personalization = None
pre_vtx_o_wgt = None
nstart = None
max_iter = 100
has_precomputed_vertex_out_weight
if personalization_perc != 0:
personalization, p = personalize(g.nodes(), personalization_perc)
if has_precomputed_vertex_out_weight == 1:
df = df[["src", "value"]]
pre_vtx_o_wgt = (
df.groupby(["src"], as_index=False)
.sum()
.rename(columns={"src": "vertex", "value": "sums"})
)
if has_guess == 1:
nstart = cugraph.pagerank(g, personalization=personalization, tol=1e-6).rename(
columns={"pagerank": "values"}
)
max_iter = 20
expected_pr = cugraph.pagerank(
g,
personalization=personalization,
precomputed_vertex_out_weight=pre_vtx_o_wgt,
max_iter=max_iter,
tol=1e-6,
nstart=nstart,
)
result_pr = dcg.pagerank(
dg,
personalization=personalization,
precomputed_vertex_out_weight=pre_vtx_o_wgt,
max_iter=max_iter,
tol=1e-6,
nstart=nstart,
)
result_pr = result_pr.compute()
err = 0
tol = 1.0e-05
assert len(expected_pr) == len(result_pr)
compare_pr = expected_pr.merge(result_pr, on="vertex", suffixes=["_local", "_dask"])
for i in range(len(compare_pr)):
diff = abs(
compare_pr["pagerank_local"].iloc[i] - compare_pr["pagerank_dask"].iloc[i]
)
if diff > tol * 1.1:
err = err + 1
assert err == 0
@pytest.mark.mg
def test_pagerank_invalid_personalization_dtype(dask_client):
input_data_path = (utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
store_transposed=True,
)
personalization_vec = cudf.DataFrame()
personalization_vec["vertex"] = [17, 26]
personalization_vec["values"] = [0.5, 0.75]
warning_msg = (
"PageRank requires 'personalization' values to match the "
"graph's 'edge_attr' type. edge_attr type is: "
"float32 and got 'personalization' values "
"of type: float64."
)
with pytest.warns(UserWarning, match=warning_msg):
dcg.pagerank(dg, personalization=personalization_vec)
@pytest.mark.mg
def test_dask_mg_pagerank_transposed_false(dask_client):
dg = create_distributed_karate_graph(store_transposed=False)
warning_msg = (
"Pagerank expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
dcg.pagerank(dg)
@pytest.mark.mg
def test_pagerank_non_convergence(dask_client):
dg = create_distributed_karate_graph()
# Not enough allowed iterations, should not converge
with pytest.raises(cugraph.exceptions.FailedToConvergeError):
ddf = dcg.pagerank(dg, max_iter=1, fail_on_nonconvergence=True)
# Not enough allowed iterations, should not converge but do not consider
# that an error
(ddf, converged) = dcg.pagerank(dg, max_iter=1, fail_on_nonconvergence=False)
assert type(ddf) is dask_cudf.DataFrame
assert type(converged) is bool
assert converged is False
# The default max_iter value should allow convergence for this graph
(ddf, converged) = dcg.pagerank(dg, fail_on_nonconvergence=False)
assert type(ddf) is dask_cudf.DataFrame
assert type(converged) is bool
assert converged is True
# Test personalized pagerank the same way
personalization = cudf.DataFrame()
personalization["vertex"] = [17, 26]
personalization["values"] = [0.5, 0.75]
with pytest.raises(cugraph.exceptions.FailedToConvergeError):
df = dcg.pagerank(
dg, max_iter=1, personalization=personalization, fail_on_nonconvergence=True
)
(df, converged) = dcg.pagerank(
dg, max_iter=1, personalization=personalization, fail_on_nonconvergence=False
)
assert type(df) is dask_cudf.DataFrame
assert type(converged) is bool
assert converged is False
(df, converged) = dcg.pagerank(
dg, personalization=personalization, fail_on_nonconvergence=False
)
assert type(df) is dask_cudf.DataFrame
assert type(converged) is bool
assert converged is True
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/link_analysis/test_hits_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import dask_cudf
import cugraph
import cugraph.dask as dcg
from cugraph.testing import utils
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
IS_DIRECTED = [True, False]
# =============================================================================
# Pytest fixtures
# =============================================================================
datasets = utils.DATASETS_UNDIRECTED + [
utils.RAPIDS_DATASET_ROOT_DIR_PATH / "email-Eu-core.csv"
]
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"),
([50], "max_iter"),
([1.0e-6], "tol"),
(IS_DIRECTED, "directed"),
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(zip(("graph_file", "max_iter", "tol", "directed"), request.param))
return parameters
@pytest.fixture(scope="module")
def input_expected_output(input_combo):
"""
This fixture returns the inputs and expected results from the HITS algo.
(based on cuGraph HITS) which can be used for validation.
"""
input_data_path = input_combo["graph_file"]
directed = input_combo["directed"]
G = utils.generate_cugraph_graph_from_file(input_data_path, directed=directed)
sg_cugraph_hits = cugraph.hits(G, input_combo["max_iter"], input_combo["tol"])
# Save the results back to the input_combo dictionary to prevent redundant
# cuGraph runs. Other tests using the input_combo fixture will look for
# them, and if not present they will have to re-run the same cuGraph call.
sg_cugraph_hits = sg_cugraph_hits.sort_values("vertex").reset_index(drop=True)
input_combo["sg_cugraph_results"] = sg_cugraph_hits
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
edge_attr="value",
renumber=True,
store_transposed=True,
)
input_combo["MGGraph"] = dg
return input_combo
# =============================================================================
# Tests
# =============================================================================
# @pytest.mark.skipif(
# is_single_gpu(), reason="skipping MG testing on Single GPU system"
# )
@pytest.mark.mg
def test_dask_mg_hits(dask_client, benchmark, input_expected_output):
dg = input_expected_output["MGGraph"]
result_hits = benchmark(
dcg.hits, dg, input_expected_output["tol"], input_expected_output["max_iter"]
)
result_hits = (
result_hits.compute()
.sort_values("vertex")
.reset_index(drop=True)
.rename(
columns={"hubs": "mg_cugraph_hubs", "authorities": "mg_cugraph_authorities"}
)
)
expected_output = (
input_expected_output["sg_cugraph_results"]
.sort_values("vertex")
.reset_index(drop=True)
)
# Update the dask cugraph HITS results with sg cugraph results for easy
# comparison using cuDF DataFrame methods.
result_hits["sg_cugraph_hubs"] = expected_output["hubs"]
result_hits["sg_cugraph_authorities"] = expected_output["authorities"]
hubs_diffs1 = result_hits.query("mg_cugraph_hubs - sg_cugraph_hubs > 0.00001")
hubs_diffs2 = result_hits.query("mg_cugraph_hubs - sg_cugraph_hubs < -0.00001")
authorities_diffs1 = result_hits.query(
"mg_cugraph_authorities - sg_cugraph_authorities > 0.0001"
)
authorities_diffs2 = result_hits.query(
"mg_cugraph_authorities - sg_cugraph_authorities < -0.0001"
)
assert len(hubs_diffs1) == 0
assert len(hubs_diffs2) == 0
assert len(authorities_diffs1) == 0
assert len(authorities_diffs2) == 0
@pytest.mark.mg
def test_dask_mg_hits_transposed_false(dask_client):
input_data_path = (utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
dg = cugraph.Graph(directed=True)
dg.from_dask_cudf_edgelist(ddf, "src", "dst", store_transposed=False)
warning_msg = (
"HITS expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance during "
"the graph creation"
)
with pytest.warns(UserWarning, match=warning_msg):
dcg.hits(dg)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/structure/test_multigraph.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import numpy as np
import networkx as nx
import cugraph
from cugraph.testing import utils, DEFAULT_DATASETS
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_multigraph(graph_file):
# FIXME: Migrate to new test fixtures for Graph setup once available
G = graph_file.get_graph(create_using=cugraph.MultiGraph(directed=True))
dataset_path = graph_file.get_path()
nxM = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
nxM,
source="0",
target="1",
edge_attr="weight",
create_using=nx.MultiDiGraph(),
)
assert G.number_of_edges() == Gnx.number_of_edges()
assert G.number_of_nodes() == Gnx.number_of_nodes()
cuedges = cugraph.to_pandas_edgelist(G)
cuedges.rename(
columns={"src": "source", "dst": "target", "wgt": "weight"}, inplace=True
)
cuedges["weight"] = cuedges["weight"].round(decimals=3)
nxedges = nx.to_pandas_edgelist(Gnx).astype(
dtype={"source": "int32", "target": "int32", "weight": "float32"}
)
cuedges = cuedges.sort_values(by=["source", "target"]).reset_index(drop=True)
nxedges = nxedges.sort_values(by=["source", "target"]).reset_index(drop=True)
nxedges["weight"] = nxedges["weight"].round(decimals=3)
assert nxedges.equals(cuedges[["source", "target", "weight"]])
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_Graph_from_MultiGraph(graph_file):
# FIXME: Migrate to new test fixtures for Graph setup once available
GM = graph_file.get_graph(create_using=cugraph.MultiGraph())
dataset_path = graph_file.get_path()
nxM = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
GnxM = nx.from_pandas_edgelist(
nxM,
source="0",
target="1",
edge_attr="weight",
create_using=nx.MultiGraph(),
)
G = cugraph.Graph(GM)
Gnx = nx.Graph(GnxM)
assert Gnx.number_of_edges() == G.number_of_edges()
GdM = graph_file.get_graph(create_using=cugraph.MultiGraph(directed=True))
GnxdM = nx.from_pandas_edgelist(
nxM,
source="0",
target="1",
edge_attr="weight",
create_using=nx.MultiGraph(),
)
Gd = cugraph.Graph(GdM, directed=True)
Gnxd = nx.DiGraph(GnxdM)
assert Gnxd.number_of_edges() == Gd.number_of_edges()
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", DEFAULT_DATASETS)
def test_multigraph_sssp(graph_file):
# FIXME: Migrate to new test fixtures for Graph setup once available
G = graph_file.get_graph(create_using=cugraph.MultiGraph(directed=True))
cu_paths = cugraph.sssp(G, 0)
max_val = np.finfo(cu_paths["distance"].dtype).max
cu_paths = cu_paths[cu_paths["distance"] != max_val]
dataset_path = graph_file.get_path()
nxM = utils.read_csv_for_nx(dataset_path, read_weights_in_sp=True)
Gnx = nx.from_pandas_edgelist(
nxM,
source="0",
target="1",
edge_attr="weight",
create_using=nx.MultiDiGraph(),
)
nx_paths = nx.single_source_dijkstra_path_length(Gnx, 0)
cu_dist = cu_paths.sort_values(by="vertex")["distance"].to_numpy()
nx_dist = [i[1] for i in sorted(nx_paths.items())]
assert (cu_dist == nx_dist).all()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/structure/test_graph_mg.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import pytest
import copy
import cupy
import cudf
import cugraph
import dask_cudf
import cugraph.dask as dcg
import cugraph.dask.comms.comms as Comms
from cugraph.testing import utils
from dask.distributed import wait
from pylibcugraph import ResourceHandle
from pylibcugraph import bfs as pylibcugraph_bfs
from cudf.testing.testing import assert_frame_equal
from cugraph.dask.traversal.bfs import convert_to_cudf
from cugraph.dask.common.input_utils import get_distributed_data
from pylibcugraph.testing.utils import gen_fixture_params_product
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
# =============================================================================
# Pytest fixtures
# =============================================================================
IS_DIRECTED = [True, False]
datasets = utils.DATASETS_UNDIRECTED + utils.DATASETS_UNRENUMBERED
fixture_params = gen_fixture_params_product(
(datasets, "graph_file"), (IS_DIRECTED, "directed")
)
@pytest.fixture(scope="module", params=fixture_params)
def input_combo(request):
"""
Simply return the current combination of params as a dictionary for use in
tests or other parameterized fixtures.
"""
parameters = dict(zip(("graph_file", "directed"), request.param))
input_data_path = parameters["graph_file"]
directed = parameters["directed"]
chunksize = dcg.get_chunksize(input_data_path)
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
parameters["input_df"] = ddf
dg = cugraph.Graph(directed=directed)
dg.from_dask_cudf_edgelist(ddf, source="src", destination="dst", edge_attr="value")
parameters["MGGraph"] = dg
return parameters
@pytest.mark.mg
def test_nodes_functionality(dask_client, input_combo):
G = input_combo["MGGraph"]
ddf = input_combo["input_df"]
# Series has no attributed sort_values so convert the Series
# to a DataFrame
nodes = G.nodes().to_frame()
col_name = nodes.columns[0]
nodes = nodes.rename(columns={col_name: "result_nodes"})
result_nodes = nodes.compute().sort_values("result_nodes").reset_index(drop=True)
expected_nodes = (
dask_cudf.concat([ddf["src"], ddf["dst"]])
.drop_duplicates()
.to_frame()
.sort_values(0)
)
expected_nodes = expected_nodes.compute().reset_index(drop=True)
result_nodes["expected_nodes"] = expected_nodes[0]
compare = result_nodes.query("result_nodes != expected_nodes")
assert len(compare) == 0
@pytest.mark.mg
def test_has_node_functionality(dask_client, input_combo):
G = input_combo["MGGraph"]
valid_nodes = G.nodes().compute()
# randomly sample k nodes from the graph
k = random.randint(1, 20)
n = valid_nodes.sample(k).reset_index(drop=True)
print("nodes are \n", n)
assert G.has_node(n)
invalid_node = valid_nodes.max() + 1
assert G.has_node(invalid_node) is False
@pytest.mark.mg
def test_create_mg_graph(dask_client, input_combo):
G = input_combo["MGGraph"]
ddf = input_combo["input_df"]
df = ddf.compute()
# ensure graph exists
assert G._plc_graph is not None
# ensure graph is partitioned correctly
assert len(G._plc_graph) == len(dask_client.has_what())
start = dask_cudf.from_cudf(cudf.Series([1], dtype="int32"), len(G._plc_graph))
if G.renumbered:
start = G.lookup_internal_vertex_id(start, None)
data_start = get_distributed_data(start)
res = [
dask_client.submit(
lambda sID, mg_graph_x, st_x: pylibcugraph_bfs(
ResourceHandle(Comms.get_handle(sID).getHandle()),
mg_graph_x,
st_x,
False,
0,
True,
False,
),
Comms.get_session_id(),
G._plc_graph[w],
data_start.worker_to_parts[w][0],
workers=[w],
)
for w in Comms.get_workers()
]
wait(res)
cudf_result = [dask_client.submit(convert_to_cudf, cp_arrays) for cp_arrays in res]
wait(cudf_result)
result_dist = dask_cudf.from_delayed(cudf_result)
if G.renumbered:
result_dist = G.unrenumber(result_dist, "vertex")
result_dist = G.unrenumber(result_dist, "predecessor")
result_dist = result_dist.fillna(-1)
result_dist = result_dist.compute()
g = cugraph.Graph(directed=G.properties.directed)
g.from_cudf_edgelist(df, "src", "dst")
expected_dist = cugraph.bfs(g, cudf.Series([1], dtype="int32"))
compare_dist = expected_dist.merge(
result_dist, on="vertex", suffixes=["_local", "_dask"]
)
err = 0
for i in range(len(compare_dist)):
if (
compare_dist["distance_local"].iloc[i]
!= compare_dist["distance_dask"].iloc[i]
):
err = err + 1
assert err == 0
@pytest.mark.mg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_create_graph_with_edge_ids(dask_client, graph_file):
el = utils.read_csv_file(graph_file)
el["id"] = cupy.random.permutation(len(el))
el["id"] = el["id"].astype(el["1"].dtype)
el["etype"] = cupy.random.random_integers(4, size=len(el))
el["etype"] = el["etype"].astype("int32")
num_workers = len(Comms.get_workers())
el = dask_cudf.from_cudf(el, npartitions=num_workers)
with pytest.raises(ValueError):
G = cugraph.Graph()
G.from_dask_cudf_edgelist(
el,
source="0",
destination="1",
edge_attr=["2", "id", "etype"],
)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
el,
source="0",
destination="1",
edge_attr=["2", "id", "etype"],
)
@pytest.mark.mg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_create_graph_with_edge_ids_check_renumbering(dask_client, graph_file):
el = utils.read_csv_file(graph_file)
el = el.rename(columns={"0": "0_src", "1": "0_dst", "2": "value"})
el["1_src"] = el["0_src"] + 1000
el["1_dst"] = el["0_dst"] + 1000
el["edge_id"] = cupy.random.permutation(len(el))
el["edge_id"] = el["edge_id"].astype(el["1_dst"].dtype)
el["edge_type"] = cupy.random.random_integers(4, size=len(el))
el["edge_type"] = el["edge_type"].astype("int32")
num_workers = len(Comms.get_workers())
el = dask_cudf.from_cudf(el, npartitions=num_workers)
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(
el,
source=["0_src", "1_src"],
destination=["0_dst", "1_dst"],
edge_attr=["value", "edge_id", "edge_type"],
)
assert G.renumbered is True
renumbered_df = G.edgelist.edgelist_df
unrenumbered_df = G.unrenumber(renumbered_df, "renumbered_src")
unrenumbered_df = G.unrenumber(unrenumbered_df, "renumbered_dst")
unrenumbered_df.columns = unrenumbered_df.columns.str.replace(r"renumbered_", "")
assert_frame_equal(
el.compute().sort_values(by=["0_src", "0_dst"]).reset_index(drop=True),
unrenumbered_df.compute()
.sort_values(by=["0_src", "0_dst"])
.reset_index(drop=True),
check_dtype=False,
check_like=True,
)
@pytest.mark.mg
def test_graph_repartition(dask_client):
input_data_path = (utils.RAPIDS_DATASET_ROOT_DIR_PATH / "karate.csv").as_posix()
print(f"dataset={input_data_path}")
chunksize = dcg.get_chunksize(input_data_path)
num_workers = len(Comms.get_workers())
ddf = dask_cudf.read_csv(
input_data_path,
chunksize=chunksize,
delimiter=" ",
names=["src", "dst", "value"],
dtype=["int32", "int32", "float32"],
)
more_partitions = num_workers * 100
ddf = ddf.repartition(npartitions=more_partitions)
ddf = get_distributed_data(ddf)
num_futures = len(ddf.worker_to_parts.values())
assert num_futures == num_workers
@pytest.mark.mg
def test_mg_graph_serializable(dask_client, input_combo):
G = input_combo["MGGraph"]
dask_client.publish_dataset(shared_g=G)
shared_g = dask_client.get_dataset("shared_g")
assert type(shared_g) == type(G)
assert G.number_of_vertices() == shared_g.number_of_vertices()
assert G.number_of_edges() == shared_g.number_of_edges()
# cleanup
dask_client.unpublish_dataset("shared_g")
@pytest.mark.mg
def test_mg_graph_copy():
G = cugraph.MultiGraph(directed=True)
G_c = copy.deepcopy(G)
assert type(G) == type(G_c)
@pytest.mark.mg
@pytest.mark.parametrize("random_state", [42, None])
@pytest.mark.parametrize("num_vertices", [5, None])
def test_mg_select_random_vertices(
dask_client, input_combo, random_state, num_vertices
):
G = input_combo["MGGraph"]
if num_vertices is None:
# Select all vertices
num_vertices = len(G.nodes())
sampled_vertices = G.select_random_vertices(random_state, num_vertices).compute()
original_vertices_df = cudf.DataFrame()
sampled_vertices_df = cudf.DataFrame()
sampled_vertices_df["sampled_vertices"] = sampled_vertices
original_vertices_df["original_vertices"] = G.nodes().compute()
join = sampled_vertices_df.merge(
original_vertices_df, left_on="sampled_vertices", right_on="original_vertices"
)
assert len(join) == len(sampled_vertices)
@pytest.mark.mg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
@pytest.mark.parametrize(
"edge_props",
[
["edge_id", "edge_type", "weight"],
["edge_id", "edge_type"],
["edge_type", "weight"],
["edge_id"],
["weight"],
],
)
def test_graph_creation_edge_properties(dask_client, graph_file, edge_props):
df = utils.read_csv_file(graph_file)
df["edge_id"] = cupy.arange(len(df), dtype="int32")
df["edge_type"] = cupy.int32(3)
df["weight"] = 0.5
df = dask_cudf.from_cudf(df, npartitions=2)
prop_keys = {k: k for k in edge_props}
G = cugraph.Graph(directed=True)
G.from_dask_cudf_edgelist(df, source="0", destination="1", **prop_keys)
@pytest.mark.parametrize("directed", [True, False])
@pytest.mark.parametrize("renumber", [True, False])
@pytest.mark.parametrize("graph_file", datasets)
def test_graph_creation_properties(dask_client, graph_file, directed, renumber):
srcCol = "src"
dstCol = "dst"
wgtCol = "wgt"
df = cudf.read_csv(
graph_file,
delimiter=" ",
names=[srcCol, dstCol, wgtCol],
dtype=["int32", "int32", "float32"],
header=None,
)
ddf = dask_cudf.from_cudf(df, npartitions=2)
if renumber:
# trigger renumbering by passing a list of vertex column
srcCol = [srcCol]
dstCol = [dstCol]
vertexCol = srcCol + dstCol
else:
vertexCol = [srcCol, dstCol]
sG = cugraph.Graph(directed=directed)
mG = cugraph.Graph(directed=directed)
sG.from_cudf_edgelist(df, source=srcCol, destination=dstCol, edge_attr=wgtCol)
mG.from_dask_cudf_edgelist(ddf, source=srcCol, destination=dstCol, edge_attr=wgtCol)
columns = vertexCol.copy()
columns.append(wgtCol)
sG_edgelist_view = (
sG.view_edge_list()
.sort_values(by=vertexCol)
.reset_index(drop=True)
.loc[:, columns]
)
mG_edgelist_view = (
mG.view_edge_list()
.compute()
.sort_values(by=vertexCol)
.reset_index(drop=True)
.loc[:, columns]
)
assert sG.number_of_nodes() == mG.number_of_nodes()
assert sG.number_of_edges() == mG.number_of_edges()
assert_frame_equal(sG_edgelist_view, mG_edgelist_view, check_dtype=False)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/structure/test_convert_matrix.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import pytest
import numpy as np
import networkx as nx
import cugraph
from cugraph.testing import utils
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_to_from_pandas(graph_file):
# Read in the graph
M = utils.read_csv_for_nx(graph_file, read_weights_in_sp=True)
# create a NetworkX DiGraph and convert to pandas adjacency
nxG = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph
)
nx_pdf = nx.to_pandas_adjacency(nxG)
nx_pdf = nx_pdf[sorted(nx_pdf.columns)]
nx_pdf.sort_index(inplace=True)
# create a cugraph Directed Graph and convert to pandas adjacency
cuG = cugraph.from_pandas_edgelist(
M,
source="0",
destination="1",
edge_attr="weight",
create_using=cugraph.Graph(directed=True),
)
cu_pdf = cugraph.to_pandas_adjacency(cuG)
cu_pdf = cu_pdf[sorted(cu_pdf.columns)]
cu_pdf.sort_index(inplace=True)
# Compare pandas adjacency list
assert nx_pdf.equals(cu_pdf)
# Convert pandas adjacency list to graph
new_nxG = nx.from_pandas_adjacency(nx_pdf, create_using=nx.DiGraph)
new_cuG = cugraph.from_pandas_adjacency(
cu_pdf, create_using=cugraph.Graph(directed=True)
)
# Compare pandas edgelist
exp_pdf = nx.to_pandas_edgelist(new_nxG)
res_pdf = cugraph.to_pandas_edgelist(new_cuG)
exp_pdf = exp_pdf.rename(
columns={"source": "src", "target": "dst", "weight": "weights"}
)
exp_pdf = exp_pdf.sort_values(by=["src", "dst"]).reset_index(drop=True)
res_pdf = res_pdf.sort_values(by=["src", "dst"]).reset_index(drop=True)
res_pdf = res_pdf[["src", "dst", "weights"]]
assert exp_pdf.equals(res_pdf)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_from_to_numpy(graph_file):
# Read in the graph
M = utils.read_csv_for_nx(graph_file, read_weights_in_sp=True)
# create NetworkX and cugraph Directed Graph
nxG = nx.from_pandas_edgelist(
M, source="0", target="1", edge_attr="weight", create_using=nx.DiGraph
)
cuG = cugraph.from_pandas_edgelist(
M,
source="0",
destination="1",
edge_attr="weight",
create_using=cugraph.Graph(directed=True),
)
# convert graphs to numpy array
nparray_nx = nx.to_numpy_array(nxG, nodelist=cuG.nodes().values_host)
nparray_cu = cugraph.to_numpy_array(cuG)
npmatrix_nx = nx.to_numpy_array(nxG, nodelist=cuG.nodes().values_host)
npmatrix_cu = cugraph.to_numpy_array(cuG)
# Compare arrays and matrices
assert np.array_equal(nparray_nx, nparray_cu)
assert np.array_equal(np.asarray(npmatrix_nx), np.asarray(npmatrix_cu))
# Create graphs from numpy array
new_nxG = nx.from_numpy_array(nparray_nx, create_using=nx.DiGraph)
new_cuG = cugraph.from_numpy_array(
nparray_cu, create_using=cugraph.Graph(directed=True)
)
# Assert graphs are same
exp_pdf = nx.to_pandas_edgelist(new_nxG)
res_pdf = cugraph.to_pandas_edgelist(new_cuG)
exp_pdf = exp_pdf.rename(
columns={"source": "src", "target": "dst", "weight": "weights"}
)
exp_pdf = exp_pdf.sort_values(by=["src", "dst"]).reset_index(drop=True)
res_pdf = res_pdf.sort_values(by=["src", "dst"]).reset_index(drop=True)
res_pdf = res_pdf[["src", "dst", "weights"]]
assert exp_pdf.equals(res_pdf)
# Create graphs from numpy matrix
new_nxG = nx.from_numpy_array(npmatrix_nx, create_using=nx.DiGraph)
new_cuG = cugraph.from_numpy_array(
npmatrix_cu, create_using=cugraph.Graph(directed=True)
)
# Assert graphs are same
exp_pdf = nx.to_pandas_edgelist(new_nxG)
res_pdf = cugraph.to_pandas_edgelist(new_cuG)
exp_pdf = exp_pdf.rename(
columns={"source": "src", "target": "dst", "weight": "weights"}
)
exp_pdf = exp_pdf.sort_values(by=["src", "dst"]).reset_index(drop=True)
res_pdf = res_pdf.sort_values(by=["src", "dst"]).reset_index(drop=True)
res_pdf = res_pdf[["src", "dst", "weights"]]
assert exp_pdf.equals(res_pdf)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_from_edgelist(graph_file):
"""
Compare the resulting Graph objs from cugraph.from_edgelist() calls of both
a cudf and pandas DataFrame and ensure the results are equal.
"""
df = utils.read_csv_file(graph_file)
pdf = utils.read_csv_for_nx(graph_file)
G1 = cugraph.from_edgelist(df, source="0", destination="1")
G2 = cugraph.from_edgelist(pdf, source="0", destination="1")
assert G1.EdgeList == G2.EdgeList
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_from_adjlist(graph_file):
"""
Compare the resulting Graph objs from cugraph.from_adjlist() calls of both
a cudf and pandas DataFrame and ensure the results are equal.
"""
G = utils.generate_cugraph_graph_from_file(graph_file, directed=True)
(cu_offsets, cu_indices, cu_vals) = G.view_adj_list()
pd_offsets = cu_offsets.to_pandas()
pd_indices = cu_indices.to_pandas()
if cu_vals is not None:
pd_vals = cu_vals.to_pandas()
else:
pd_vals = None
# FIXME: should mixing types be allowed?
with pytest.raises(TypeError):
G1 = cugraph.from_adjlist(cu_offsets, pd_indices)
with pytest.raises(TypeError):
G1 = cugraph.from_adjlist(cu_offsets, cu_indices, cu_vals, create_using=33)
G1 = cugraph.from_adjlist(
cu_offsets, cu_indices, cu_vals, create_using=cugraph.Graph(directed=True)
)
G2 = cugraph.from_adjlist(
pd_offsets, pd_indices, pd_vals, create_using=cugraph.Graph(directed=True)
)
assert G1.AdjList == G2.AdjList
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/structure/test_graph.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import time
import pytest
import pandas as pd
import scipy
import networkx as nx
import cupy
import cudf
import cugraph
from cugraph.testing import utils
from cudf.testing import assert_series_equal
from cudf.testing.testing import assert_frame_equal
# MG
import dask_cudf
import cugraph.dask as dcg
from dask.distributed import Client
from dask_cuda import LocalCUDACluster
from pylibcugraph import ResourceHandle
from pylibcugraph import bfs as pylibcugraph_bfs
from cugraph.dask.traversal.bfs import convert_to_cudf
from cugraph.dask.common.mg_utils import is_single_gpu
# =============================================================================
# Pytest Setup / Teardown - called for each test function
# =============================================================================
def setup_function():
gc.collect()
def compare_series(series_1, series_2):
assert len(series_1) == len(series_2)
df = cudf.DataFrame({"series_1": series_1, "series_2": series_2})
diffs = df.query("series_1 != series_2")
if len(diffs) > 0:
print("diffs:\n", diffs)
assert len(diffs) == 0
# This function returns True if two graphs are identical (bijection between the
# vertices in one graph to the vertices in the other graph is identity AND two
# graphs are automorphic; no permutations of vertices are allowed).
def compare_graphs(nx_graph, cu_graph):
edgelist_df = cu_graph.view_edge_list().reset_index(drop=True)
df = cudf.DataFrame()
df["source"] = edgelist_df["source"]
df["target"] = edgelist_df["target"]
if len(edgelist_df.columns) > 2:
df["weight"] = edgelist_df["weights"]
cu_to_nx_graph = nx.from_pandas_edgelist(
df.to_pandas(),
source="source",
target="target",
edge_attr=["weight"],
create_using=nx.DiGraph(),
)
else:
cu_to_nx_graph = nx.from_pandas_edgelist(
df.to_pandas(), create_using=nx.DiGraph()
)
# first compare nodes
ds0 = pd.Series(list(nx_graph.nodes)).sort_values(ignore_index=True)
ds1 = pd.Series(list(cu_to_nx_graph.nodes)).sort_values(ignore_index=True)
if not ds0.equals(ds1):
print("ds0 != ds1")
return False
# second compare edges
diff = nx.difference(nx_graph, cu_to_nx_graph)
if diff.number_of_edges() > 0:
print("diff.number_of_edges = ", diff.number_of_edges())
return False
diff = nx.difference(cu_to_nx_graph, nx_graph)
if diff.number_of_edges() > 0:
print("2: diff.number_of_edges = ", diff.number_of_edges())
return False
if len(edgelist_df.columns) > 2:
df0 = cudf.from_pandas(nx.to_pandas_edgelist(nx_graph))
merge = df.merge(df0, on=["source", "target"], suffixes=("_cugraph", "_nx"))
print("merge = \n", merge)
print(merge[merge.weight_cugraph != merge.weight_nx])
if not merge["weight_cugraph"].equals(merge["weight_nx"]):
print("weights different")
print(merge[merge.weight_cugraph != merge.weight_nx])
return False
return True
def find_two_paths(df, M):
for i in range(len(df)):
start = df["first"][i]
end = df["second"][i]
foundPath = False
for idx in range(M.indptr[start], M.indptr[start + 1]):
mid = M.indices[idx]
for innerIdx in range(M.indptr[mid], M.indptr[mid + 1]):
if M.indices[innerIdx] == end:
foundPath = True
break
if foundPath:
break
if not foundPath:
print("No path found between " + str(start) + " and " + str(end))
assert foundPath
def has_pair(first_arr, second_arr, first, second):
for i in range(len(first_arr)):
firstMatch = first_arr[i] == first
secondMatch = second_arr[i] == second
if firstMatch and secondMatch:
return True
return False
def check_all_two_hops(df, M):
num_verts = len(M.indptr) - 1
first_arr = df["first"].to_numpy()
second_arr = df["second"].to_numpy()
for start in range(num_verts):
for idx in range(M.indptr[start], M.indptr[start + 1]):
mid = M.indices[idx]
for innerIdx in range(M.indptr[mid], M.indptr[mid + 1]):
end = M.indices[innerIdx]
if start != end:
assert has_pair(first_arr, second_arr, start, end)
@pytest.mark.sg
def test_version():
cugraph.__version__
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_add_edge_list_to_adj_list(graph_file):
cu_M = utils.read_csv_file(graph_file)
M = utils.read_csv_for_nx(graph_file)
N = max(max(M["0"]), max(M["1"])) + 1
M = scipy.sparse.csr_matrix((M.weight, (M["0"], M["1"])), shape=(N, N))
offsets_exp = M.indptr
indices_exp = M.indices
# cugraph add_egde_list to_adj_list call
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(cu_M, source="0", destination="1", renumber=False)
offsets_cu, indices_cu, values_cu = G.view_adj_list()
compare_series(offsets_cu, offsets_exp)
compare_series(indices_cu, indices_exp)
assert values_cu is None
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_add_adj_list_to_edge_list(graph_file):
Mnx = utils.read_csv_for_nx(graph_file)
N = max(max(Mnx["0"]), max(Mnx["1"])) + 1
Mcsr = scipy.sparse.csr_matrix((Mnx.weight, (Mnx["0"], Mnx["1"])), shape=(N, N))
offsets = cudf.Series(Mcsr.indptr)
indices = cudf.Series(Mcsr.indices)
Mcoo = Mcsr.tocoo()
sources_exp = cudf.Series(Mcoo.row)
destinations_exp = cudf.Series(Mcoo.col)
# cugraph add_adj_list to_edge_list call
G = cugraph.Graph(directed=True)
G.from_cudf_adjlist(offsets, indices, None)
edgelist = G.view_edge_list()
sources_cu = edgelist["src"]
destinations_cu = edgelist["dst"]
compare_series(sources_cu, sources_exp)
compare_series(destinations_cu, destinations_exp)
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_view_edge_list_from_adj_list(graph_file):
Mnx = utils.read_csv_for_nx(graph_file)
N = max(max(Mnx["0"]), max(Mnx["1"])) + 1
Mcsr = scipy.sparse.csr_matrix((Mnx.weight, (Mnx["0"], Mnx["1"])), shape=(N, N))
offsets = cudf.Series(Mcsr.indptr)
indices = cudf.Series(Mcsr.indices)
G = cugraph.Graph(directed=True)
G.from_cudf_adjlist(offsets, indices, None)
edgelist_df = G.view_edge_list()
Mcoo = Mcsr.tocoo()
src1 = Mcoo.row
dst1 = Mcoo.col
compare_series(src1, edgelist_df["src"])
compare_series(dst1, edgelist_df["dst"])
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_delete_edge_list_delete_adj_list(graph_file):
Mnx = utils.read_csv_for_nx(graph_file)
df = cudf.DataFrame()
df["src"] = cudf.Series(Mnx["0"])
df["dst"] = cudf.Series(Mnx["1"])
N = max(max(Mnx["0"]), max(Mnx["1"])) + 1
Mcsr = scipy.sparse.csr_matrix((Mnx.weight, (Mnx["0"], Mnx["1"])), shape=(N, N))
offsets = cudf.Series(Mcsr.indptr)
indices = cudf.Series(Mcsr.indices)
# cugraph delete_adj_list delete_edge_list call
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(df, source="src", destination="dst")
G.delete_edge_list()
with pytest.raises(Exception):
G.view_adj_list()
G.from_cudf_adjlist(offsets, indices, None)
G.delete_adj_list()
with pytest.raises(Exception):
G.view_edge_list()
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_add_edge_or_adj_list_after_add_edge_or_adj_list(graph_file):
Mnx = utils.read_csv_for_nx(graph_file)
df = cudf.DataFrame()
df["src"] = cudf.Series(Mnx["0"])
df["dst"] = cudf.Series(Mnx["1"])
N = max(max(Mnx["0"]), max(Mnx["1"])) + 1
Mcsr = scipy.sparse.csr_matrix((Mnx.weight, (Mnx["0"], Mnx["1"])), shape=(N, N))
offsets = cudf.Series(Mcsr.indptr)
indices = cudf.Series(Mcsr.indices)
G = cugraph.Graph(directed=True)
# If cugraph has at least one graph representation, adding a new graph
# should fail to prevent a single graph object storing two different
# graphs.
# If cugraph has a graph edge list, adding a new graph should fail.
G.from_cudf_edgelist(df, source="src", destination="dst")
with pytest.raises(Exception):
G.from_cudf_edgelist(df, source="src", destination="dst")
with pytest.raises(Exception):
G.from_cudf_adjlist(offsets, indices, None)
G.delete_edge_list()
# If cugraph has a graph adjacency list, adding a new graph should fail.
G.from_cudf_adjlist(offsets, indices, None)
with pytest.raises(Exception):
G.from_cudf_edgelist(df, source="src", destination="dst")
with pytest.raises(Exception):
G.from_cudf_adjlist(offsets, indices, None)
G.delete_adj_list()
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_edges_for_Graph(graph_file):
cu_M = utils.read_csv_file(graph_file)
# Create nx Graph
pdf = cu_M.to_pandas()[["0", "1"]]
nx_graph = nx.from_pandas_edgelist(
pdf, source="0", target="1", create_using=nx.Graph
)
nx_edges = nx_graph.edges()
# Create Cugraph Graph from DataFrame
# Force it to use renumber_from_cudf
G = cugraph.from_cudf_edgelist(
cu_M, source=["0"], destination=["1"], create_using=cugraph.Graph
)
cu_edge_list = G.edges()
# Check if number of Edges is same
assert len(nx_edges) == len(cu_edge_list)
assert nx_graph.number_of_edges() == G.number_of_edges()
# Compare nx and cugraph edges when viewing edgelist
edges = []
for edge in nx_edges:
if edge[0] > edge[1]:
edges.append([edge[1], edge[0]])
else:
edges.append([edge[0], edge[1]])
nx_edge_list = cudf.DataFrame(list(edges), columns=["0", "1"])
assert_frame_equal(
nx_edge_list.sort_values(by=["0", "1"]).reset_index(drop=True),
cu_edge_list.sort_values(by=["0", "1"]).reset_index(drop=True),
check_dtype=False,
)
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_view_edge_list_for_Graph(graph_file):
cu_M = utils.read_csv_file(graph_file)
# Create nx Graph
pdf = cu_M.to_pandas()[["0", "1"]]
nx_graph = nx.from_pandas_edgelist(
pdf, source="0", target="1", create_using=nx.Graph
)
nx_edges = nx_graph.edges()
# Create Cugraph Graph from DataFrame
G = cugraph.from_cudf_edgelist(
cu_M, source="0", destination="1", create_using=cugraph.Graph
)
cu_edge_list = G.view_edge_list().sort_values(["0", "1"])
# Check if number of Edges is same
assert len(nx_edges) == len(cu_edge_list)
assert nx_graph.number_of_edges() == G.number_of_edges()
# Get edges as upper triangle of matrix
edges = []
for edge in nx_edges:
if edge[0] > edge[1]:
edges.append([edge[1], edge[0]])
else:
edges.append([edge[0], edge[1]])
edges = list(edges)
edges.sort()
nx_edge_list = cudf.DataFrame(edges, columns=["0", "1"])
# Compare nx and cugraph edges when viewing edgelist
# assert cu_edge_list.equals(nx_edge_list)
assert (cu_edge_list["0"].to_numpy() == nx_edge_list["0"].to_numpy()).all()
assert (cu_edge_list["1"].to_numpy() == nx_edge_list["1"].to_numpy()).all()
# Test
@pytest.mark.sg
@pytest.mark.filterwarnings("ignore:make_current is deprecated:DeprecationWarning")
@pytest.mark.parametrize("graph_file", utils.DATASETS)
@pytest.mark.skipif(is_single_gpu(), reason="skipping MG testing on Single GPU system")
def test_consolidation(graph_file):
cluster = LocalCUDACluster()
client = Client(cluster)
chunksize = dcg.get_chunksize(graph_file)
M = utils.read_csv_for_nx(graph_file)
df = pd.DataFrame()
df["source"] = pd.Series(M["0"])
df["target"] = pd.Series(M["1"])
ddf = dask_cudf.read_csv(
graph_file,
chunksize=chunksize,
delimiter=" ",
names=["source", "target", "weight"],
dtype=["int32", "int32", "float32"],
header=None,
)
Gnx = nx.from_pandas_edgelist(
df, source="source", target="target", create_using=nx.DiGraph
)
G = cugraph.from_cudf_edgelist(
ddf,
source="source",
destination="target",
create_using=cugraph.Graph(directed=True),
)
t1 = time.time()
assert compare_graphs(Gnx, G)
t2 = time.time() - t1
print("compare_graphs time: ", t2)
Gnx.clear()
G.clear()
client.close()
cluster.close()
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
def test_two_hop_neighbors(graph_file):
cu_M = utils.read_csv_file(graph_file)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="2")
df = G.get_two_hop_neighbors()
Mnx = utils.read_csv_for_nx(graph_file)
N = max(max(Mnx["0"]), max(Mnx["1"])) + 1
Mcsr = scipy.sparse.csr_matrix((Mnx.weight, (Mnx["0"], Mnx["1"])), shape=(N, N))
find_two_paths(df, Mcsr)
check_all_two_hops(df, Mcsr)
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_degree_functionality(graph_file):
M = utils.read_csv_for_nx(graph_file)
cu_M = utils.read_csv_file(graph_file)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="2")
Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.DiGraph())
cu_in_degree = G.in_degree().sort_values(by="vertex", ignore_index=True)
cu_out_degree = G.out_degree().sort_values(by="vertex", ignore_index=True)
cu_degree = G.degree().sort_values(by="vertex", ignore_index=True)
cu_results = cu_degree
cu_results["in_degree"] = cu_in_degree["degree"]
cu_results["out_degree"] = cu_out_degree["degree"]
nx_in_degree = list(Gnx.in_degree())
nx_out_degree = list(Gnx.out_degree())
nx_degree = list(Gnx.degree())
nx_in_degree.sort(key=lambda v: v[0])
nx_out_degree.sort(key=lambda v: v[0])
nx_degree.sort(key=lambda v: v[0])
nx_results = cudf.DataFrame()
nx_results["vertex"] = dict(nx_degree).keys()
nx_results["degree"] = dict(nx_degree).values()
nx_results["in_degree"] = dict(nx_in_degree).values()
nx_results["out_degree"] = dict(nx_out_degree).values()
assert_series_equal(
cu_results["in_degree"],
nx_results["in_degree"],
check_names=False,
check_dtype=False,
)
assert_series_equal(
cu_results["out_degree"],
nx_results["out_degree"],
check_names=False,
check_dtype=False,
)
assert_series_equal(
cu_results["degree"],
nx_results["degree"],
check_names=False,
check_dtype=False,
)
# testing degrees functionality
df = G.degrees().sort_values(by="vertex", ignore_index=True)
assert_series_equal(
df["in_degree"],
nx_results["in_degree"],
check_names=False,
check_dtype=False,
)
assert_series_equal(
df["out_degree"],
nx_results["out_degree"],
check_names=False,
check_dtype=False,
)
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_number_of_vertices(graph_file):
cu_M = utils.read_csv_file(graph_file)
M = utils.read_csv_for_nx(graph_file)
if M is None:
raise TypeError("Could not read the input graph")
# cugraph add_edge_list
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(cu_M, source="0", destination="1")
Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.DiGraph())
assert G.number_of_vertices() == Gnx.number_of_nodes()
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
def test_to_directed(graph_file):
cu_M = utils.read_csv_file(graph_file)
cu_M = cu_M[cu_M["0"] <= cu_M["1"]].reset_index(drop=True)
M = utils.read_csv_for_nx(graph_file)
M = M[M["0"] <= M["1"]]
assert len(cu_M) == len(M)
# cugraph add_edge_list
G = cugraph.Graph()
G.from_cudf_edgelist(cu_M, source="0", destination="1")
Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.Graph())
DiG = G.to_directed()
DiGnx = Gnx.to_directed()
assert DiG.is_directed()
assert DiG.number_of_nodes() == DiGnx.number_of_nodes()
assert DiG.number_of_edges() == DiGnx.number_of_edges()
assert DiG._plc_graph is not None
for index, row in cu_M.to_pandas().iterrows():
assert G.has_edge(row["0"], row["1"])
assert G.has_edge(row["1"], row["0"])
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
def test_to_undirected(graph_file):
# Read data and then convert to directed by dropped some edges
cu_M = utils.read_csv_file(graph_file)
cu_M = cu_M[cu_M["0"] <= cu_M["1"]].reset_index(drop=True)
M = utils.read_csv_for_nx(graph_file)
M = M[M["0"] <= M["1"]]
assert len(cu_M) == len(M)
# cugraph add_edge_list
DiG = cugraph.Graph(directed=True)
DiG.from_cudf_edgelist(cu_M, source="0", destination="1")
DiGnx = nx.from_pandas_edgelist(
M, source="0", target="1", create_using=nx.DiGraph()
)
for index, row in cu_M.to_pandas().iterrows():
assert DiG.has_edge(row["0"], row["1"])
assert not DiG.has_edge(row["1"], row["0"])
G = DiG.to_undirected()
Gnx = DiGnx.to_undirected()
assert not G.is_directed()
assert G.number_of_nodes() == Gnx.number_of_nodes()
assert G.number_of_edges() == Gnx.number_of_edges()
assert G._plc_graph is not None
for index, row in cu_M.to_pandas().iterrows():
assert G.has_edge(row["0"], row["1"])
assert G.has_edge(row["1"], row["0"])
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_has_edge(graph_file):
cu_M = utils.read_csv_file(graph_file)
cu_M = cu_M[cu_M["0"] <= cu_M["1"]].reset_index(drop=True)
# cugraph add_edge_list
G = cugraph.Graph()
G.from_cudf_edgelist(cu_M, source="0", destination="1")
for index, row in cu_M.to_pandas().iterrows():
assert G.has_edge(row["0"], row["1"])
assert G.has_edge(row["1"], row["0"])
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_has_node(graph_file):
cu_M = utils.read_csv_file(graph_file)
nodes = cudf.concat([cu_M["0"], cu_M["1"]]).unique()
# cugraph add_edge_list
G = cugraph.Graph()
G.from_cudf_edgelist(cu_M, source="0", destination="1")
for n in nodes.values_host:
assert G.has_node(n)
@pytest.mark.sg
def test_invalid_has_node():
df = cudf.DataFrame([[1, 2]], columns=["src", "dst"])
G = cugraph.Graph()
G.from_cudf_edgelist(df, source="src", destination="dst")
assert not G.has_node(-1)
assert not G.has_node(0)
assert not G.has_node(G.number_of_nodes() + 1)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_bipartite_api(graph_file):
# This test only tests the functionality of adding set of nodes and
# retrieving them. The datasets currently used are not truly bipartite.
cu_M = utils.read_csv_file(graph_file)
nodes = cudf.concat([cu_M["0"], cu_M["1"]]).unique().sort_values()
# Create set of nodes for partition
set1_exp = cudf.Series(nodes[0 : int(len(nodes) / 2)])
set2_exp = cudf.Series(set(nodes.values_host) - set(set1_exp.values_host))
G = cugraph.BiPartiteGraph()
assert G.is_bipartite()
# Add a set of nodes present in one partition
G.add_nodes_from(set1_exp, bipartite="set1")
G.from_cudf_edgelist(cu_M, source="0", destination="1")
# Call sets() to get the bipartite set of nodes.
set1, set2 = G.sets()
# assert if the input set1_exp is same as returned bipartite set1
assert set1.equals(set1_exp)
# assert if set2 is the remaining set of nodes not in set1_exp
assert set2.equals(set2_exp)
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_neighbors(graph_file):
cu_M = utils.read_csv_file(graph_file)
nodes = cudf.concat([cu_M["0"], cu_M["1"]]).unique()
M = utils.read_csv_for_nx(graph_file)
G = cugraph.Graph()
G.from_cudf_edgelist(cu_M, source="0", destination="1")
Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.Graph())
for n in nodes.values_host:
cu_neighbors = G.neighbors(n).to_arrow().to_pylist()
nx_neighbors = [i for i in Gnx.neighbors(n)]
cu_neighbors.sort()
nx_neighbors.sort()
assert cu_neighbors == nx_neighbors
# Test
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_to_pandas_edgelist(graph_file):
cu_M = utils.read_csv_file(graph_file)
G = cugraph.Graph()
G.from_cudf_edgelist(cu_M, source="0", destination="1")
assert "0" in G.to_pandas_edgelist("0", "1").columns
assert "0" in G.to_pandas_edgelist(source="0", destination="1").columns
@pytest.mark.sg
def test_graph_init_with_multigraph():
"""
Ensures only a valid MultiGraph instance can be used to initialize a Graph
by checking if either the correct exception is raised or no exception at
all.
"""
nxMG = nx.MultiGraph()
with pytest.raises(TypeError):
cugraph.Graph(m_graph=nxMG)
gdf = cudf.DataFrame({"src": [0, 1, 2], "dst": [1, 2, 3]})
cMG = cugraph.MultiGraph()
cMG.from_cudf_edgelist(gdf, source="src", destination="dst")
cugraph.Graph(m_graph=cMG)
cDiMG = cugraph.MultiGraph(directed=True)
cDiMG.from_cudf_edgelist(gdf, source="src", destination="dst")
cugraph.Graph(m_graph=cDiMG)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_create_sg_graph(graph_file):
el = utils.read_csv_file(graph_file)
G = cugraph.from_cudf_edgelist(el, source=["0"], destination=["1"], edge_attr="2")
# ensure graph exists
assert G._plc_graph is not None
start = cudf.Series([1], dtype="int32")
start = G.lookup_internal_vertex_id(start)
if graph_file.name == "dolphins.csv":
res = pylibcugraph_bfs(
ResourceHandle(), G._plc_graph, start, False, 0, True, False
)
cdr = convert_to_cudf(res)
cdr = G.unrenumber(cdr, column_name="vertex")
cdr = G.unrenumber(cdr, column_name="predecessor")
assert cdr[cdr.vertex == 33].distance.to_numpy()[0] == 3
assert cdr[cdr.vertex == 33].predecessor.to_numpy()[0] == 37
assert cdr[cdr.vertex == 11].distance.to_numpy()[0] == 4
assert cdr[cdr.vertex == 11].predecessor.to_numpy()[0] == 51
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_create_graph_with_edge_ids(graph_file):
el = utils.read_csv_file(graph_file)
el["id"] = cupy.random.permutation(len(el))
el["id"] = el["id"].astype(el["1"].dtype)
el["etype"] = cupy.random.random_integers(4, size=len(el))
el["etype"] = el["etype"].astype("int32")
with pytest.raises(ValueError):
G = cugraph.Graph()
G.from_cudf_edgelist(
el,
source="0",
destination="1",
edge_attr=["2", "id", "etype"],
)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
el,
source="0",
destination="1",
edge_attr=["2", "id", "etype"],
)
assert G.is_directed()
# 'edge_ids are not supported for undirected graph"
with pytest.raises(ValueError):
G.to_undirected()
# assert not H.is_directed()
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_create_graph_with_edge_ids_check_renumbering(graph_file):
el = utils.read_csv_file(graph_file)
el = el.rename(columns={"0": "0_src", "1": "0_dst", "2": "weights"})
el["1_src"] = el["0_src"] + 1000
el["1_dst"] = el["0_dst"] + 1000
el["edge_id"] = cupy.random.permutation(len(el))
el["edge_id"] = el["edge_id"].astype(el["1_dst"].dtype)
el["edge_type"] = cupy.random.random_integers(4, size=len(el))
el["edge_type"] = el["edge_type"].astype("int32")
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(
el,
source=["0_src", "1_src"],
destination=["0_dst", "1_dst"],
edge_attr=["weights", "edge_id", "edge_type"],
)
assert G.renumbered is True
renumbered_df = G.edgelist.edgelist_df
unrenumbered_df = G.unrenumber(renumbered_df, "src")
unrenumbered_df = G.unrenumber(unrenumbered_df, "dst")
assert_frame_equal(
el.sort_values(by=["0_src", "0_dst"]).reset_index(drop=True),
unrenumbered_df.sort_values(by=["0_src", "0_dst"]).reset_index(drop=True),
check_dtype=False,
check_like=True,
)
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS)
def test_density(graph_file):
cu_M = utils.read_csv_file(graph_file)
M = utils.read_csv_for_nx(graph_file)
if M is None:
raise TypeError("Could not read the input graph")
# cugraph add_edge_list
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(cu_M, source="0", destination="1")
Gnx = nx.from_pandas_edgelist(M, source="0", target="1", create_using=nx.DiGraph())
assert G.density() == nx.density(Gnx)
M_G = cugraph.MultiGraph()
with pytest.raises(TypeError):
M_G.density()
# Test
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
@pytest.mark.parametrize("random_state", [42, None])
@pytest.mark.parametrize("num_vertices", [5, None])
def test_select_random_vertices(graph_file, random_state, num_vertices):
cu_M = utils.read_csv_file(graph_file)
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(cu_M, source="0", destination="1", edge_attr="2")
if num_vertices is None:
# Select all vertices
num_vertices = G.number_of_nodes()
sampled_vertices = G.select_random_vertices(random_state, num_vertices)
original_vertices_df = cudf.DataFrame()
sampled_vertices_df = cudf.DataFrame()
sampled_vertices_df["sampled_vertices"] = sampled_vertices
original_vertices_df["original_vertices"] = G.nodes()
join = sampled_vertices_df.merge(
original_vertices_df, left_on="sampled_vertices", right_on="original_vertices"
)
assert len(join) == len(sampled_vertices)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
@pytest.mark.parametrize(
"edge_props",
[
["edge_id", "edge_type", "weight"],
["edge_id", "edge_type"],
["edge_type", "weight"],
["edge_id"],
["weight"],
],
)
def test_graph_creation_edge_properties(graph_file, edge_props):
df = utils.read_csv_file(graph_file)
df["edge_id"] = cupy.arange(len(df), dtype="int32")
df["edge_type"] = cupy.int32(3)
df["weight"] = 0.5
prop_keys = {k: k for k in edge_props}
G = cugraph.Graph(directed=True)
G.from_cudf_edgelist(df, source="0", destination="1", **prop_keys)
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
@pytest.mark.parametrize("directed", [True, False])
@pytest.mark.parametrize("renumber", [True, False])
def test_graph_creation_edges(graph_file, directed, renumber):
# Verifies that the input dataframe passed the user is the same
# retrieved from the graph when the graph is directed
srcCol = "source"
dstCol = "target"
wgtCol = "weight"
input_df = cudf.read_csv(
graph_file,
delimiter=" ",
names=[srcCol, dstCol, wgtCol],
dtype=["int32", "int32", "float32"],
header=None,
)
G = cugraph.Graph(directed=directed)
if renumber:
# trigger renumbering by passing a list of vertex column
srcCol = [srcCol]
dstCol = [dstCol]
vertexCol = srcCol + dstCol
else:
vertexCol = [srcCol, dstCol]
G.from_cudf_edgelist(input_df, source=srcCol, destination=dstCol, edge_attr=wgtCol)
columns = vertexCol.copy()
columns.append(wgtCol)
edge_list_view = G.view_edge_list().loc[:, columns]
edges = G.edges().loc[:, vertexCol]
assert_frame_equal(
edge_list_view.drop(columns=wgtCol)
.sort_values(by=vertexCol)
.reset_index(drop=True),
edges.sort_values(by=vertexCol).reset_index(drop=True),
check_dtype=False,
)
if directed:
assert_frame_equal(
edge_list_view.sort_values(by=vertexCol).reset_index(drop=True),
input_df.sort_values(by=vertexCol).reset_index(drop=True),
check_dtype=False,
)
else:
# If the graph is undirected, ensures that only the upper triangular
# matrix of the adjacency matrix is returned
if isinstance(srcCol, list):
srcCol = srcCol[0]
dstCol = dstCol[0]
is_upper_triangular = edge_list_view[srcCol] <= edge_list_view[dstCol]
is_upper_triangular = list(set(is_upper_triangular.values_host))
assert len(is_upper_triangular) == 1
assert is_upper_triangular[0]
@pytest.mark.sg
@pytest.mark.parametrize("graph_file", utils.DATASETS_SMALL)
@pytest.mark.parametrize("directed", [True, False])
def test_graph_creation_edges_multi_col_vertices(graph_file, directed):
srcCol = ["src_0", "src_1"]
dstCol = ["dst_0", "dst_1"]
wgtCol = "weight"
vertexCol = srcCol + dstCol
columns = vertexCol.copy()
columns.append(wgtCol)
input_df = cudf.read_csv(
graph_file,
delimiter=" ",
names=[srcCol[0], dstCol[0], wgtCol],
dtype=["int32", "int32", "float32"],
header=None,
)
input_df["src_1"] = input_df["src_0"] + 1000
input_df["dst_1"] = input_df["dst_0"] + 1000
G = cugraph.Graph(directed=directed)
G.from_cudf_edgelist(input_df, source=srcCol, destination=dstCol, edge_attr=wgtCol)
input_df = input_df.loc[:, columns]
edge_list_view = G.view_edge_list().loc[:, columns]
edges = G.edges().loc[:, vertexCol]
assert_frame_equal(
edge_list_view.drop(columns=wgtCol)
.sort_values(by=vertexCol)
.reset_index(drop=True),
edges.sort_values(by=vertexCol).reset_index(drop=True),
check_dtype=False,
)
if directed:
assert_frame_equal(
edge_list_view.sort_values(by=vertexCol).reset_index(drop=True),
input_df.sort_values(by=vertexCol).reset_index(drop=True),
check_dtype=False,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tests/structure/test_hypergraph.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2015, Graphistry, Inc.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Graphistry, Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL Graphistry, Inc BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime as dt
import pandas as pd
import pytest
import cudf
from cudf.testing.testing import assert_frame_equal
import cugraph
simple_df = cudf.DataFrame.from_pandas(
pd.DataFrame(
{
"id": ["a", "b", "c"],
"a1": [1, 2, 3],
"a2": ["red", "blue", "green"],
"🙈": ["æski ēˈmōjē", "😋", "s"],
}
)
)
hyper_df = cudf.DataFrame.from_pandas(
pd.DataFrame({"aa": [0, 1, 2], "bb": ["a", "b", "c"], "cc": ["b", "0", "1"]})
)
@pytest.mark.sg
def test_complex_df():
complex_df = pd.DataFrame(
{
"src": [0, 1, 2, 3],
"dst": [1, 2, 3, 0],
"colors": [1, 1, 2, 2],
"bool": [True, False, True, True],
"char": ["a", "b", "c", "d"],
"str": ["a", "b", "c", "d"],
"ustr": ["a", "b", "c", "d"],
"emoji": ["😋", "😋😋", "😋", "😋"],
"int": [0, 1, 2, 3],
"num": [0.5, 1.5, 2.5, 3.5],
"date_str": [
"2018-01-01 00:00:00",
"2018-01-02 00:00:00",
"2018-01-03 00:00:00",
"2018-01-05 00:00:00",
],
"date": [
dt.datetime(2018, 1, 1),
dt.datetime(2018, 1, 1),
dt.datetime(2018, 1, 1),
dt.datetime(2018, 1, 1),
],
"time": [
pd.Timestamp("2018-01-05"),
pd.Timestamp("2018-01-05"),
pd.Timestamp("2018-01-05"),
pd.Timestamp("2018-01-05"),
],
}
)
for c in complex_df.columns:
try:
complex_df[c + "_cat"] = complex_df[c].astype("category")
except Exception:
# lists aren't categorical
# print('could not make categorical', c)
pass
complex_df = cudf.DataFrame.from_pandas(complex_df)
cugraph.hypergraph(complex_df)
@pytest.mark.sg
@pytest.mark.parametrize("categorical_metadata", [False, True])
def test_hyperedges(categorical_metadata):
h = cugraph.hypergraph(simple_df, categorical_metadata=categorical_metadata)
assert len(h.keys()) == len(["entities", "nodes", "edges", "events", "graph"])
edges = cudf.from_pandas(
pd.DataFrame(
{
"event_id": [
"event_id::0",
"event_id::1",
"event_id::2",
"event_id::0",
"event_id::1",
"event_id::2",
"event_id::0",
"event_id::1",
"event_id::2",
"event_id::0",
"event_id::1",
"event_id::2",
],
"edge_type": [
"a1",
"a1",
"a1",
"a2",
"a2",
"a2",
"id",
"id",
"id",
"🙈",
"🙈",
"🙈",
],
"attrib_id": [
"a1::1",
"a1::2",
"a1::3",
"a2::red",
"a2::blue",
"a2::green",
"id::a",
"id::b",
"id::c",
"🙈::æski ēˈmōjē",
"🙈::😋",
"🙈::s",
],
"id": ["a", "b", "c"] * 4,
"a1": [1, 2, 3] * 4,
"a2": ["red", "blue", "green"] * 4,
"🙈": ["æski ēˈmōjē", "😋", "s"] * 4,
}
)
)
if categorical_metadata:
edges = edges.astype({"edge_type": "category"})
assert_frame_equal(edges, h["edges"], check_dtype=False)
for (k, v) in [("entities", 12), ("nodes", 15), ("edges", 12), ("events", 3)]:
assert len(h[k]) == v
@pytest.mark.sg
def test_hyperedges_direct():
h = cugraph.hypergraph(hyper_df, direct=True)
assert len(h["edges"]) == 9
assert len(h["nodes"]) == 9
@pytest.mark.sg
def test_hyperedges_direct_categories():
h = cugraph.hypergraph(
hyper_df,
direct=True,
categories={
"aa": "N",
"bb": "N",
"cc": "N",
},
)
assert len(h["edges"]) == 9
assert len(h["nodes"]) == 6
@pytest.mark.sg
def test_hyperedges_direct_manual_shaping():
h1 = cugraph.hypergraph(
hyper_df,
direct=True,
EDGES={"aa": ["cc"], "cc": ["cc"]},
)
assert len(h1["edges"]) == 6
h2 = cugraph.hypergraph(
hyper_df,
direct=True,
EDGES={"aa": ["cc", "bb", "aa"], "cc": ["cc"]},
)
assert len(h2["edges"]) == 12
@pytest.mark.sg
@pytest.mark.parametrize("categorical_metadata", [False, True])
def test_drop_edge_attrs(categorical_metadata):
h = cugraph.hypergraph(
simple_df,
columns=["id", "a1", "🙈"],
drop_edge_attrs=True,
categorical_metadata=categorical_metadata,
)
assert len(h.keys()) == len(["entities", "nodes", "edges", "events", "graph"])
edges = cudf.DataFrame.from_pandas(
pd.DataFrame(
{
"event_id": [
"event_id::0",
"event_id::1",
"event_id::2",
"event_id::0",
"event_id::1",
"event_id::2",
"event_id::0",
"event_id::1",
"event_id::2",
],
"edge_type": ["a1", "a1", "a1", "id", "id", "id", "🙈", "🙈", "🙈"],
"attrib_id": [
"a1::1",
"a1::2",
"a1::3",
"id::a",
"id::b",
"id::c",
"🙈::æski ēˈmōjē",
"🙈::😋",
"🙈::s",
],
}
)
)
if categorical_metadata:
edges = edges.astype({"edge_type": "category"})
assert_frame_equal(edges, h["edges"], check_dtype=False)
for (k, v) in [("entities", 9), ("nodes", 12), ("edges", 9), ("events", 3)]:
assert len(h[k]) == v
@pytest.mark.sg
@pytest.mark.parametrize("categorical_metadata", [False, True])
def test_drop_edge_attrs_direct(categorical_metadata):
h = cugraph.hypergraph(
simple_df,
["id", "a1", "🙈"],
direct=True,
drop_edge_attrs=True,
EDGES={"id": ["a1"], "a1": ["🙈"]},
categorical_metadata=categorical_metadata,
)
assert len(h.keys()) == len(["entities", "nodes", "edges", "events", "graph"])
edges = cudf.DataFrame.from_pandas(
pd.DataFrame(
{
"event_id": [
"event_id::0",
"event_id::1",
"event_id::2",
"event_id::0",
"event_id::1",
"event_id::2",
],
"edge_type": ["a1::🙈", "a1::🙈", "a1::🙈", "id::a1", "id::a1", "id::a1"],
"src": ["a1::1", "a1::2", "a1::3", "id::a", "id::b", "id::c"],
"dst": ["🙈::æski ēˈmōjē", "🙈::😋", "🙈::s", "a1::1", "a1::2", "a1::3"],
}
)
)
if categorical_metadata:
edges = edges.astype({"edge_type": "category"})
assert_frame_equal(edges, h["edges"], check_dtype=False)
for (k, v) in [("entities", 9), ("nodes", 9), ("edges", 6), ("events", 0)]:
assert len(h[k]) == v
@pytest.mark.sg
def test_skip_hyper():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ["a", None, "b"], "b": ["a", "b", "c"], "c": [1, 2, 3]})
)
hg = cugraph.hypergraph(df, SKIP=["c"], dropna=False)
assert len(hg["graph"].nodes()) == 9
assert len(hg["graph"].edges()) == 6
@pytest.mark.sg
def test_skip_drop_na_hyper():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ["a", None, "b"], "b": ["a", "b", "c"], "c": [1, 2, 3]})
)
hg = cugraph.hypergraph(df, SKIP=["c"], dropna=True)
assert len(hg["graph"].nodes()) == 8
assert len(hg["graph"].edges()) == 5
@pytest.mark.sg
def test_skip_direct():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ["a", None, "b"], "b": ["a", "b", "c"], "c": [1, 2, 3]})
)
hg = cugraph.hypergraph(df, SKIP=["c"], dropna=False, direct=True)
assert len(hg["graph"].nodes()) == 6
assert len(hg["graph"].edges()) == 3
@pytest.mark.sg
def test_skip_drop_na_direct():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ["a", None, "b"], "b": ["a", "b", "c"], "c": [1, 2, 3]})
)
hg = cugraph.hypergraph(df, SKIP=["c"], dropna=True, direct=True)
assert len(hg["graph"].nodes()) == 4
assert len(hg["graph"].edges()) == 2
@pytest.mark.sg
def test_drop_na_hyper():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ["a", None, "c"], "i": [1, 2, None]})
)
hg = cugraph.hypergraph(df, dropna=True)
assert len(hg["graph"].nodes()) == 7
assert len(hg["graph"].edges()) == 4
@pytest.mark.sg
def test_drop_na_direct():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"a": ["a", None, "a"], "i": [1, 1, None]})
)
hg = cugraph.hypergraph(df, dropna=True, direct=True)
assert len(hg["graph"].nodes()) == 2
assert len(hg["graph"].edges()) == 1
@pytest.mark.sg
def test_skip_na_hyperedge():
nans_df = cudf.DataFrame.from_pandas(
pd.DataFrame({"x": ["a", "b", "c"], "y": ["aa", None, "cc"]})
)
expected_hits = ["a", "b", "c", "aa", "cc"]
skip_attr_h_edges = cugraph.hypergraph(nans_df, drop_edge_attrs=True)["edges"]
assert len(skip_attr_h_edges) == len(expected_hits)
default_h_edges = cugraph.hypergraph(nans_df)["edges"]
assert len(default_h_edges) == len(expected_hits)
@pytest.mark.sg
def test_hyper_to_pa_vanilla():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"x": ["a", "b", "c"], "y": ["d", "e", "f"]})
)
hg = cugraph.hypergraph(df)
nodes_arr = hg["graph"].nodes().to_arrow()
assert len(nodes_arr) == 9
edges_err = hg["graph"].edges().to_arrow()
assert len(edges_err) == 6
@pytest.mark.sg
def test_hyper_to_pa_mixed():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]})
)
hg = cugraph.hypergraph(df)
nodes_arr = hg["graph"].nodes().to_arrow()
assert len(nodes_arr) == 9
edges_err = hg["graph"].edges().to_arrow()
assert len(edges_err) == 6
@pytest.mark.sg
def test_hyper_to_pa_na():
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"x": ["a", None, "c"], "y": [1, 2, None]})
)
hg = cugraph.hypergraph(df, dropna=False)
print(hg["graph"].nodes())
nodes_arr = hg["graph"].nodes().to_arrow()
assert len(hg["graph"].nodes()) == 9
assert len(nodes_arr) == 9
edges_err = hg["graph"].edges().to_arrow()
assert len(hg["graph"].edges()) == 6
assert len(edges_err) == 6
@pytest.mark.sg
def test_hyper_to_pa_all():
hg = cugraph.hypergraph(simple_df, ["id", "a1", "🙈"])
nodes_arr = hg["graph"].nodes().to_arrow()
assert len(hg["graph"].nodes()) == 12
assert len(nodes_arr) == 12
edges_err = hg["graph"].edges().to_arrow()
assert len(hg["graph"].edges()) == 9
assert len(edges_err) == 9
@pytest.mark.sg
def test_hyper_to_pa_all_direct():
hg = cugraph.hypergraph(simple_df, ["id", "a1", "🙈"], direct=True)
nodes_arr = hg["graph"].nodes().to_arrow()
assert len(hg["graph"].nodes()) == 9
assert len(nodes_arr) == 9
edges_err = hg["graph"].edges().to_arrow()
assert len(hg["graph"].edges()) == 9
assert len(edges_err) == 9
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .feature_storage.feat_storage import FeatureStore
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/feature_storage/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/feature_storage/feat_storage.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections import defaultdict
from typing import Sequence, Union
import cudf
import cupy as cp
import numpy as np
import pandas as pd
from cugraph.utilities.utils import import_optional, MissingModule
torch = import_optional("torch")
wgth = import_optional("pylibwholegraph.torch")
class FeatureStore:
"""The feature-store class used to store feature data for GNNs"""
def __init__(
self,
backend: str = "numpy",
wg_comm: object = None,
wg_type: str = None,
wg_location: str = None,
):
"""
Constructs a new FeatureStore object
Parameters:
----------
backend: str ('numpy', 'torch', 'wholegraph')
Optional (default='numpy')
The name of the backend to use.
wg_comm: WholeMemoryCommunicator
Optional (default=automatic)
Only used with the 'wholegraph' backend.
The communicator to use to store features in WholeGraph.
wg_type: str ('distributed', 'continuous', 'chunked')
Optional (default='distributed')
Only used with the 'wholegraph' backend.
The memory format (distributed, continuous, or chunked) of
this FeatureStore. For more information see the WholeGraph
documentation.
wg_location: str ('cpu', 'cuda')
Optional (default='cuda')
Only used with the 'wholegraph' backend.
Where the data is stored (cpu or cuda).
Defaults to storing on the GPU (cuda).
"""
self.fd = defaultdict(dict)
if backend not in ["numpy", "torch", "wholegraph"]:
raise ValueError(
f"backend {backend} not supported. "
"Supported backends are numpy, torch, wholegraph"
)
self.backend = backend
self.__wg_comm = None
self.__wg_type = None
self.__wg_location = None
if backend == "wholegraph":
self.__wg_comm = (
wg_comm if wg_comm is not None else wgth.get_local_node_communicator()
)
self.__wg_type = wg_type if wg_type is not None else "distributed"
self.__wg_location = wg_location if wg_location is not None else "cuda"
if self.__wg_type not in ["distributed", "chunked", "continuous"]:
raise ValueError(f"invalid memory format {self.__wg_type}")
if (self.__wg_location != "cuda") and (self.__wg_location != "cpu"):
raise ValueError(f"invalid location {self.__wg_location}")
def add_data(
self, feat_obj: Sequence, type_name: str, feat_name: str, **kwargs
) -> None:
"""
Add the feature data to the feature_storage class
Parameters:
----------
feat_obj : array_like object
The feature object to save in feature store
type_name : str
The node-type/edge-type of the feature
feat_name: str
The name of the feature being stored
Returns:
-------
None
"""
self.fd[feat_name][type_name] = self._cast_feat_obj_to_backend(
feat_obj,
self.backend,
wg_comm=self.__wg_comm,
wg_type=self.__wg_type,
wg_location=self.__wg_location,
**kwargs,
)
def add_data_no_cast(self, feat_obj, type_name: str, feat_name: str) -> None:
"""
Direct add the feature data to the feature_storage class with no cast
Parameters:
----------
feat_obj : array_like object
The feature object to save in feature store
type_name : str
The node-type/edge-type of the feature
feat_name: str
The name of the feature being stored
Returns:
-------
None
"""
self.fd[feat_name][type_name] = feat_obj
def get_data(
self,
indices: Union[np.ndarray, torch.Tensor],
type_name: str,
feat_name: str,
) -> Union[np.ndarray, torch.Tensor]:
"""
Retrieve the feature data corresponding to the indices, type and feature name
Parameters:
-----------
indices: np.ndarray or torch.Tensor
The indices of the values to extract.
type_name : str
The node-type/edge-type to store data
feat_name:
The feature name to retrieve data for
Returns:
--------
np.ndarray or torch.Tensor
Array object of the backend type
"""
if feat_name not in self.fd:
raise ValueError(
f"{feat_name} not found in features: {list(self.fd.keys())}"
)
if type_name not in self.fd[feat_name]:
raise ValueError(
f"type_name {type_name} not found in"
f" feature: {list(self.fd[feat_name].keys())}"
)
feat = self.fd[feat_name][type_name]
if not isinstance(wgth, MissingModule) and isinstance(
feat, wgth.WholeMemoryEmbedding
):
indices_tensor = (
indices
if isinstance(indices, torch.Tensor)
else torch.as_tensor(indices, device="cuda")
)
return feat.gather(indices_tensor)
else:
return feat[indices]
def get_feature_list(self) -> list[str]:
return {feat_name: feats.keys() for feat_name, feats in self.fd.items()}
@staticmethod
def _cast_feat_obj_to_backend(feat_obj, backend: str, **kwargs):
if backend == "numpy":
if isinstance(feat_obj, (cudf.DataFrame, pd.DataFrame)):
return _cast_to_numpy_ar(feat_obj.values, **kwargs)
else:
return _cast_to_numpy_ar(feat_obj, **kwargs)
elif backend == "torch":
if isinstance(feat_obj, (cudf.DataFrame, pd.DataFrame)):
return _cast_to_torch_tensor(feat_obj.values, **kwargs)
else:
return _cast_to_torch_tensor(feat_obj, **kwargs)
elif backend == "wholegraph":
return _get_wg_embedding(feat_obj, **kwargs)
def _get_wg_embedding(feat_obj, wg_comm=None, wg_type=None, wg_location=None, **kwargs):
wg_comm_obj = wg_comm or wgth.get_local_node_communicator()
wg_type_str = wg_type or "distributed"
wg_location_str = wg_location or "cuda"
if isinstance(feat_obj, (cudf.DataFrame, pd.DataFrame)):
th_tensor = _cast_to_torch_tensor(feat_obj.values)
else:
th_tensor = _cast_to_torch_tensor(feat_obj)
wg_embedding = wgth.create_embedding(
wg_comm_obj,
wg_type_str,
wg_location_str,
th_tensor.dtype,
th_tensor.shape,
)
(
local_wg_tensor,
local_ld_offset,
) = wg_embedding.get_embedding_tensor().get_local_tensor()
local_th_tensor = th_tensor[
local_ld_offset : local_ld_offset + local_wg_tensor.shape[0]
]
local_wg_tensor.copy_(local_th_tensor)
wg_comm_obj.barrier()
return wg_embedding
def _cast_to_torch_tensor(ar, **kwargs):
if isinstance(ar, cp.ndarray):
ar = torch.as_tensor(ar, device="cuda")
elif isinstance(ar, np.ndarray):
ar = torch.from_numpy(ar)
else:
ar = torch.as_tensor(ar)
return ar
def _cast_to_numpy_ar(ar, **kwargs):
if isinstance(ar, cp.ndarray):
ar = ar.get()
elif type(ar).__name__ == "Tensor":
ar = ar.numpy()
else:
ar = np.asarray(ar)
return ar
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/data_loading/bulk_sampler_io.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cudf
import cupy
from math import ceil
from pandas import isna
from typing import Union, Optional, List
def create_df_from_disjoint_series(series_list: List[cudf.Series]):
series_list.sort(key=lambda s: len(s), reverse=True)
df = cudf.DataFrame()
for s in series_list:
df[s.name] = s
return df
def _write_samples_to_parquet_csr(
results: cudf.DataFrame,
offsets: cudf.DataFrame,
renumber_map: cudf.DataFrame,
batches_per_partition: int,
output_path: str,
partition_info: Optional[Union[dict, str]] = None,
) -> cudf.Series:
"""
Writes CSR/CSC compressed samples to parquet.
Batches that are empty are discarded, and the remaining non-empty
batches are renumbered to be contiguous starting from the first
batch id. This means that the output batch ids may not match
the input batch ids.
results: cudf.DataFrame
The results dataframe containing the sampled minibatches.
offsets: cudf.DataFrame
The offsets dataframe indicating the start/end of each minibatch
in the reuslts dataframe.
renumber_map: cudf.DataFrame
The renumber map containing the mapping of renumbered vertex ids
to original vertex ids.
batches_per_partition: int
The maximum number of minibatches allowed per written parquet partition.
output_path: str
The output path (where parquet files should be written to).
partition_info: Union[dict, str]
Either a dictionary containing partition data from dask, the string 'sg'
indicating that this is a single GPU write, or None indicating that this
function should perform a no-op (required by dask).
Returns an empty cudf series.
"""
# Required by dask; need to skip dummy partitions.
if partition_info is None or len(results) == 0:
return cudf.Series(dtype="int64")
if partition_info != "sg" and (not isinstance(partition_info, dict)):
raise ValueError("Invalid value of partition_info")
# Additional check to skip dummy partitions required for CSR format.
if isna(offsets.batch_id.iloc[0]):
return cudf.Series(dtype="int64")
# Output:
# major_offsets - CSR/CSC row/col pointers
# minors - CSR/CSC col/row indices
# edge id - edge ids (same shape as minors)
# edge type - edge types (same shape as minors)
# weight - edge weight (same shape as minors)
# renumber map - the original vertex ids
# renumber map offsets - start/end of the map for each batch
# (only 1 per batch b/c of framework
# stipulations making this legal)
# label-hop offsets - indicate the start/end of each hop
# for each batch
batch_ids = offsets.batch_id
label_hop_offsets = offsets.offsets
renumber_map_offsets = offsets.renumber_map_offsets
del offsets
batch_ids.dropna(inplace=True)
label_hop_offsets.dropna(inplace=True)
renumber_map_offsets.dropna(inplace=True)
major_offsets_array = results.major_offsets
results.drop(columns="major_offsets", inplace=True)
major_offsets_array.dropna(inplace=True)
major_offsets_array = major_offsets_array.values
minors_array = results.minors
results.drop(columns="minors", inplace=True)
minors_array.dropna(inplace=True)
minors_array = minors_array.values
weight_array = results.weight
results.drop(columns="weight", inplace=True)
weight_array.dropna(inplace=True)
weight_array = (
cupy.array([], dtype="float32") if weight_array.empty else weight_array.values
)
edge_id_array = results.edge_id
results.drop(columns="edge_id", inplace=True)
edge_id_array.dropna(inplace=True)
edge_id_array = (
cupy.array([], dtype="int64") if edge_id_array.empty else edge_id_array.values
)
edge_type_array = results.edge_type
results.drop(columns="edge_type", inplace=True)
edge_type_array.dropna(inplace=True)
edge_type_array = (
cupy.array([], dtype="int32")
if edge_type_array.empty
else edge_type_array.values
)
del results
offsets_length = len(label_hop_offsets) - 1
if offsets_length % len(batch_ids) != 0:
raise ValueError("Invalid hop offsets")
fanout_length = int(offsets_length / len(batch_ids))
for p in range(0, int(ceil(len(batch_ids) / batches_per_partition))):
partition_start = p * (batches_per_partition)
partition_end = (p + 1) * (batches_per_partition)
label_hop_offsets_current_partition = label_hop_offsets.iloc[
partition_start * fanout_length : partition_end * fanout_length + 1
].reset_index(drop=True)
label_hop_offsets_current_partition.name = "label_hop_offsets"
batch_ids_current_partition = batch_ids.iloc[partition_start:partition_end]
(
major_offsets_start,
major_offsets_end,
) = label_hop_offsets_current_partition.iloc[
[0, -1]
].values # legal since offsets has the 1 extra offset
results_start, results_end = major_offsets_array[
[major_offsets_start, major_offsets_end]
] # avoid d2h copy
# no need to use end batch id, just ensure the batch is labeled correctly
start_batch_id = batch_ids_current_partition.iloc[0]
# end_batch_id = batch_ids_current_partition.iloc[-1]
# create the renumber map offsets
renumber_map_offsets_current_partition = renumber_map_offsets.iloc[
partition_start : partition_end + 1
].reset_index(drop=True)
renumber_map_offsets_current_partition.name = "renumber_map_offsets"
(
renumber_map_start,
renumber_map_end,
) = renumber_map_offsets_current_partition.iloc[
[0, -1]
].values # avoid d2h copy
results_current_partition = create_df_from_disjoint_series(
[
cudf.Series(minors_array[results_start:results_end], name="minors"),
cudf.Series(
renumber_map.map.values[renumber_map_start:renumber_map_end],
name="map",
),
label_hop_offsets_current_partition,
cudf.Series(
major_offsets_array[major_offsets_start : major_offsets_end + 1],
name="major_offsets",
),
cudf.Series(weight_array[results_start:results_end], name="weight"),
cudf.Series(edge_id_array[results_start:results_end], name="edge_id"),
cudf.Series(
edge_type_array[results_start:results_end], name="edge_type"
),
renumber_map_offsets_current_partition,
]
)
end_batch_id = start_batch_id + len(batch_ids_current_partition) - 1
filename = f"batch={start_batch_id}-{end_batch_id}.parquet"
full_output_path = os.path.join(output_path, filename)
results_current_partition.to_parquet(
full_output_path, compression=None, index=False, force_nullable_schema=True
)
return cudf.Series(dtype="int64")
def _write_samples_to_parquet_coo(
results: cudf.DataFrame,
offsets: cudf.DataFrame,
renumber_map: cudf.DataFrame,
batches_per_partition: int,
output_path: str,
partition_info: Optional[Union[dict, str]] = None,
) -> cudf.Series:
"""
Writes COO compressed samples to parquet.
Batches that are empty are discarded, and the remaining non-empty
batches are renumbered to be contiguous starting from the first
batch id. This means that the output batch ids may not match
the input batch ids.
results: cudf.DataFrame
The results dataframe containing the sampled minibatches.
offsets: cudf.DataFrame
The offsets dataframe indicating the start/end of each minibatch
in the reuslts dataframe.
renumber_map: cudf.DataFrame
The renumber map containing the mapping of renumbered vertex ids
to original vertex ids.
batches_per_partition: int
The maximum number of minibatches allowed per written parquet partition.
output_path: str
The output path (where parquet files should be written to).
partition_info: Union[dict, str]
Either a dictionary containing partition data from dask, the string 'sg'
indicating that this is a single GPU write, or None indicating that this
function should perform a no-op (required by dask).
Returns an empty cudf series.
"""
# Required by dask; need to skip dummy partitions.
if partition_info is None or len(results) == 0:
return cudf.Series(dtype="int64")
if partition_info != "sg" and (not isinstance(partition_info, dict)):
raise ValueError("Invalid value of partition_info")
offsets = offsets[:-1]
# Offsets is always in order, so the last batch id is always the highest
max_batch_id = offsets.batch_id.iloc[-1]
results.dropna(axis=1, how="all", inplace=True)
results["hop_id"] = results["hop_id"].astype("uint8")
for p in range(0, len(offsets), batches_per_partition):
offsets_p = offsets.iloc[p : p + batches_per_partition]
start_batch_id = offsets_p.batch_id.iloc[0]
end_batch_id = offsets_p.batch_id.iloc[len(offsets_p) - 1]
reached_end = end_batch_id == max_batch_id
start_ix = offsets_p.offsets.iloc[0]
if reached_end:
end_ix = len(results)
else:
offsets_z = offsets[offsets.batch_id == (end_batch_id + 1)]
end_ix = offsets_z.offsets.iloc[0]
results_p = results.iloc[start_ix:end_ix].reset_index(drop=True)
if end_batch_id - start_batch_id + 1 > len(offsets_p):
# This occurs when some batches returned 0 samples.
# To properly account this, the remaining batches are
# renumbered to have contiguous batch ids and the empty
# samples are dropped.
offsets_p.drop("batch_id", axis=1, inplace=True)
batch_id_range = cudf.Series(
cupy.arange(start_batch_id, start_batch_id + len(offsets_p))
)
end_batch_id = start_batch_id + len(offsets_p) - 1
else:
batch_id_range = offsets_p.batch_id
results_p["batch_id"] = batch_id_range.repeat(
cupy.diff(offsets_p.offsets.values, append=end_ix)
).values
if renumber_map is not None:
renumber_map_start_ix = offsets_p.renumber_map_offsets.iloc[0]
if reached_end:
renumber_map_end_ix = len(renumber_map)
else:
renumber_map_end_ix = offsets_z.renumber_map_offsets.iloc[0]
renumber_map_p = renumber_map.map.iloc[
renumber_map_start_ix:renumber_map_end_ix
]
# Add the length so no na-checking is required in the loading stage
map_offset = (
end_batch_id - start_batch_id + 2
) - offsets_p.renumber_map_offsets.iloc[0]
renumber_map_o = cudf.concat(
[
offsets_p.renumber_map_offsets + map_offset,
cudf.Series(
[len(renumber_map_p) + len(offsets_p) + 1], dtype="int32"
),
]
)
renumber_offset_len = len(renumber_map_o)
if renumber_offset_len != end_batch_id - start_batch_id + 2:
raise ValueError("Invalid batch id or renumber map")
final_map_series = cudf.concat(
[
renumber_map_o,
renumber_map_p,
],
ignore_index=True,
)
if len(final_map_series) > len(results_p):
# this should rarely happen and only occurs on small graphs/samples
# TODO remove the sort_index to improve performance on small graphs
final_map_series.name = "map"
results_p = results_p.join(final_map_series, how="outer").sort_index()
else:
results_p["map"] = final_map_series
full_output_path = os.path.join(
output_path, f"batch={start_batch_id}-{end_batch_id}.parquet"
)
results_p.to_parquet(
full_output_path, compression=None, index=False, force_nullable_schema=True
)
return cudf.Series(dtype="int64")
def write_samples(
results: cudf.DataFrame,
offsets: cudf.DataFrame,
renumber_map: cudf.DataFrame,
batches_per_partition: cudf.DataFrame,
output_path: str,
):
"""
Writes the samples to parquet.
Batches in each partition that are empty are discarded, and the remaining non-empty
batches are renumbered to be contiguous starting from the first
batch id in the partition.
This means that the output batch ids may not match the input batch ids.
results: cudf.DataFrame
The results dataframe containing the sampled minibatches.
offsets: cudf.DataFrame
The offsets dataframe indicating the start/end of each minibatch
in the reuslts dataframe.
renumber_map: cudf.DataFrame
The renumber map containing the mapping of renumbered vertex ids
to original vertex ids.
batches_per_partition: int
The maximum number of minibatches allowed per written parquet partition.
output_path: str
The output path (where parquet files should be written to).
"""
if ("majors" in results.columns) and ("minors" in results.columns):
write_fn = _write_samples_to_parquet_coo
# TODO these names will be deprecated in release 23.12
elif ("sources" in results.columns) and ("destinations" in results.columns):
write_fn = _write_samples_to_parquet_coo
elif "major_offsets" in results.columns and "minors" in results.columns:
write_fn = _write_samples_to_parquet_csr
else:
raise ValueError("invalid columns")
if hasattr(results, "compute"):
results.map_partitions(
write_fn,
offsets,
renumber_map,
batches_per_partition,
output_path,
align_dataframes=False,
meta=cudf.Series(dtype="int64"),
).compute()
else:
write_fn(
results,
offsets,
renumber_map,
batches_per_partition,
output_path,
partition_info="sg",
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/data_loading/__init__.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.gnn.data_loading.bulk_sampler import EXPERIMENTAL__BulkSampler
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/data_loading/bulk_sampler.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
import cudf
import dask_cudf
from dask.distributed import wait
from dask.distributed import futures_of
import cugraph
import pylibcugraph
from cugraph.gnn.data_loading.bulk_sampler_io import write_samples
import warnings
import logging
import time
class EXPERIMENTAL__BulkSampler:
"""
Performs sampling based on input seeds grouped into batches by
a batch id. Writes the output minibatches to parquet, with
partition sizes specified by the user. Allows controlling the
number of input seeds per sampling function call. Supports
basic logging.
Batches in each partition that are empty are discarded, and the remaining non-empty
batches are renumbered to be contiguous starting from the first
batch id in the partition.
This means that the output batch ids may not match the input batch ids.
See GitHub issue #3794 for more details.
"""
start_col_name = "_START_"
batch_col_name = "_BATCH_"
def __init__(
self,
batch_size: int,
output_path: str,
graph,
seeds_per_call: int = 200_000,
batches_per_partition: int = 100,
renumber: bool = False,
log_level: int = None,
**kwargs,
):
"""
Constructs a new BulkSampler
Parameters
----------
batch_size: int
The size of each batch.
output_path: str
The directory where results will be stored.
graph: cugraph.Graph
The cugraph graph to operate upon.
seeds_per_call: int (optional, default=200,000)
The number of seeds (start vertices) that can be processed by
a single sampling call.
batches_per_partition: int (optional, default=100)
The number of batches outputted to a single parquet partition.
renumber: bool (optional, default=False)
Whether to renumber vertices. Currently only supported for
homogeneous graphs.
log_level: int (optional, default=None)
Whether to enable logging for this sampler. Supports 3 levels
of logging if enabled (INFO, WARNING, ERROR). If not provided,
defaults to WARNING.
kwargs: kwargs
Keyword arguments to be passed to the sampler (i.e. fanout).
"""
self.__logger = logging.getLogger(__name__)
self.__logger.setLevel(log_level or logging.WARNING)
max_batches_per_partition = seeds_per_call // batch_size
if batches_per_partition > max_batches_per_partition:
warnings.warn(
f"batches_per_partition ({batches_per_partition}) is >"
f" seeds_per_call / batch size ({max_batches_per_partition})"
"; automatically setting batches_per_partition to "
"{max_batches_per_partition}"
)
batches_per_partition = max_batches_per_partition
self.__batch_size = batch_size
self.__output_path = output_path
self.__graph = graph
self.__seeds_per_call = seeds_per_call
self.__batches_per_partition = batches_per_partition
self.__renumber = renumber
self.__batches = None
self.__sample_call_args = kwargs
@property
def seeds_per_call(self) -> int:
return self.__seeds_per_call
@property
def batch_size(self) -> int:
return self.__batch_size
@property
def batches_per_partition(self) -> int:
return self.__batches_per_partition
@property
def renumber(self) -> bool:
return self.__renumber
@property
def size(self) -> int:
if self.__batches is None:
return 0
else:
return len(self.__batches)
def add_batches(
self,
df: Union[cudf.DataFrame, dask_cudf.DataFrame],
start_col_name: str,
batch_col_name: str,
) -> None:
"""
Adds batches to this BulkSampler.
Parameters
----------
df: cudf.DataFrame or dask_cudf.DataFrame
Contains columns for vertex ids, batch id
start_col_name: str
Name of the column containing the start vertices
batch_col_name: str
Name of the column containing the batch ids
Returns
-------
None
Examples
--------
>>> import cudf
>>> from cugraph.experimental.gnn import BulkSampler
>>> from cugraph.datasets import karate
>>> import tempfile
>>> df = cudf.DataFrame({
... "start_vid": [0, 4, 2, 3, 9, 11],
... "start_batch": cudf.Series(
... [0, 0, 0, 1, 1, 1], dtype="int32")})
>>> output_tempdir = tempfile.TemporaryDirectory()
>>> bulk_sampler = BulkSampler(
... batch_size=3,
... output_path=output_tempdir.name,
... graph=karate.get_graph(download=True))
>>> bulk_sampler.add_batches(
... df,
... start_col_name="start_vid",
... batch_col_name="start_batch")
"""
df = df[[start_col_name, batch_col_name]].rename(
columns={
start_col_name: self.start_col_name,
batch_col_name: self.batch_col_name,
}
)
if self.__batches is None:
self.__batches = df
else:
if isinstance(df, type(self.__batches)):
if isinstance(df, dask_cudf.DataFrame):
concat_fn = dask_cudf.concat
else:
concat_fn = cudf.concat
self.__batches = concat_fn([self.__batches, df])
else:
raise TypeError(
"Provided batches must match the dataframe"
" type of previous batches!"
)
if self.size >= self.seeds_per_call:
self.__logger.info(
f"Number of input seeds ({self.size})"
f" is >= seeds per call ({self.seeds_per_call})."
" Calling flush() to compute and write minibatches."
)
self.flush()
def flush(self) -> None:
"""
Computes all uncomputed batches
"""
if self.size == 0:
return
start_time_calc_batches = time.perf_counter()
if isinstance(self.__batches, dask_cudf.DataFrame):
self.__batches = self.__batches.persist()
min_batch_id = self.__batches[self.batch_col_name].min()
if isinstance(self.__batches, dask_cudf.DataFrame):
min_batch_id = min_batch_id.persist()
else:
min_batch_id = int(min_batch_id)
partition_size = self.batches_per_partition * self.batch_size
partitions_per_call = (
self.seeds_per_call + partition_size - 1
) // partition_size
npartitions = partitions_per_call
max_batch_id = min_batch_id + npartitions * self.batches_per_partition - 1
if isinstance(self.__batches, dask_cudf.DataFrame):
max_batch_id = max_batch_id.persist()
batch_id_filter = self.__batches[self.batch_col_name] <= max_batch_id
if isinstance(batch_id_filter, dask_cudf.Series):
batch_id_filter = batch_id_filter.persist()
end_time_calc_batches = time.perf_counter()
self.__logger.info(
f"Calculated batches to sample; min = {min_batch_id}"
f" and max = {max_batch_id};"
f" took {end_time_calc_batches - start_time_calc_batches:.4f} s"
)
if isinstance(self.__graph._plc_graph, pylibcugraph.graphs.SGGraph):
sample_fn = cugraph.uniform_neighbor_sample
else:
sample_fn = cugraph.dask.uniform_neighbor_sample
self.__sample_call_args.update(
{
"_multiple_clients": True,
"keep_batches_together": True,
"min_batch_id": min_batch_id,
"max_batch_id": max_batch_id,
}
)
start_time_sample_call = time.perf_counter()
# Call uniform neighbor sample
output = sample_fn(
self.__graph,
**self.__sample_call_args,
start_list=self.__batches[[self.start_col_name, self.batch_col_name]][
batch_id_filter
],
with_batch_ids=True,
with_edge_properties=True,
return_offsets=True,
renumber=self.__renumber,
# use_legacy_names=False,
)
if self.__renumber:
samples, offsets, renumber_map = output
else:
samples, offsets = output
renumber_map = None
end_time_sample_call = time.perf_counter()
sample_runtime = end_time_sample_call - start_time_sample_call
self.__logger.info(
f"Called uniform neighbor sample, took {sample_runtime:.4f} s"
)
# Filter batches to remove those already processed
self.__batches = self.__batches[~batch_id_filter]
del batch_id_filter
if isinstance(self.__batches, dask_cudf.DataFrame):
self.__batches = self.__batches.persist()
start_time_write = time.perf_counter()
# Write batches to parquet
self.__write(samples, offsets, renumber_map)
if isinstance(self.__batches, dask_cudf.DataFrame):
futures = [f.release() for f in futures_of(samples)] + [
f.release() for f in futures_of(offsets)
]
if renumber_map is not None:
futures += [f.release() for f in futures_of(renumber_map)]
wait(futures)
del samples
del offsets
if renumber_map is not None:
del renumber_map
end_time_write = time.perf_counter()
write_runtime = end_time_write - start_time_write
self.__logger.info(f"Wrote samples to parquet, took {write_runtime} seconds")
current_size = self.size
if current_size > 0:
self.__logger.info(
f"There are still {current_size} samples remaining, "
"calling flush() again..."
)
self.flush()
def __write(
self,
samples: Union[cudf.DataFrame, dask_cudf.DataFrame],
offsets: Union[cudf.DataFrame, dask_cudf.DataFrame],
renumber_map: Union[cudf.DataFrame, dask_cudf.DataFrame],
) -> None:
os.makedirs(self.__output_path, exist_ok=True)
write_samples(
samples,
offsets,
renumber_map,
self.__batches_per_partition,
self.__output_path,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/dgl_extensions/dgl_uniform_sampler.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import cached_property
import cugraph
import cudf
import cupy as cp
from cugraph.gnn.dgl_extensions.utils.sampling import (
sample_cugraph_graphs,
get_subgraph_and_src_range_from_edgelist,
get_underlying_dtype_from_sg,
)
from cugraph.gnn.dgl_extensions.utils.sampling import src_n, dst_n
class DGLUniformSampler:
"""
class object to do uniform sampling
"""
def __init__(self, edge_list_dict, etype_range_dict, etype_id_dict, single_gpu):
self.edge_list_dict = edge_list_dict
self.etype_id_dict = etype_id_dict
self.etype_id_range_dict = {
self.etype_id_dict[etype]: r for etype, r in etype_range_dict.items()
}
self.single_gpu = single_gpu
def sample_neighbors(
self, nodes_ar, fanout=-1, edge_dir="in", prob=None, replace=False
):
"""
Sample neighboring edges of the given nodes and return the subgraph.
Parameters
----------
nodes_ar : cupy array of node ids or dict with key of node type
and value of node ids gto sample neighbors from.
fanout : int
The number of edges to be sampled for each node on each edge type.
If -1 is given all the neighboring edges for each node on
each edge type will be selected.
edge_dir : str {"in" or "out"}
Determines whether to sample inbound or outbound edges.
Can take either in for inbound edges or out for outbound edges.
prob : str
Feature name used as the (unnormalized) probabilities associated
with each neighboring edge of a node. Each feature must be a
scalar. The features must be non-negative floats, and the sum of
the features of inbound/outbound edges for every node must be
positive (though they don't have to sum up to one). Otherwise,
the result will be undefined. If not specified, sample uniformly.
replace : bool
If True, sample with replacement.
Returns
-------
[src, dst, eids] or {etype1:[src, dst, eids],...,}
"""
if prob is not None:
raise NotImplementedError(
"prob is not currently supported",
" for sample_neighbors in CuGraphStorage",
)
if edge_dir not in ["in", "out"]:
raise ValueError(
f"edge_dir must be either 'in' or 'out' got {edge_dir} instead"
)
if self.has_multiple_etypes:
# TODO: Convert into a single call when
# https://github.com/rapidsai/cugraph/issues/2696 lands
if edge_dir == "in":
sgs_obj, sgs_src_range_obj = self.extracted_reverse_subgraphs_per_type
else:
sgs_obj, sgs_src_range_obj = self.extracted_subgraphs_per_type
first_sg = list(sgs_obj.values())[0]
else:
if edge_dir == "in":
sgs_obj, sgs_src_range_obj = self.extracted_reverse_subgraph
else:
sgs_obj, sgs_src_range_obj = self.extracted_subgraph
first_sg = sgs_obj
# Uniform sampling fails when the dtype
# of the seed dtype is not same as the node dtype
self.set_sg_node_dtype(first_sg)
if self.single_gpu:
sample_f = cugraph.uniform_neighbor_sample
else:
sample_f = cugraph.dask.uniform_neighbor_sample
sampled_df = sample_cugraph_graphs(
sample_f=sample_f,
has_multiple_etypes=self.has_multiple_etypes,
sgs_obj=sgs_obj,
sgs_src_range_obj=sgs_src_range_obj,
sg_node_dtype=self._sg_node_dtype,
nodes_ar=nodes_ar,
replace=replace,
fanout=fanout,
edge_dir=edge_dir,
)
if self.has_multiple_etypes:
# Heterogeneous graph case
# Add type information
return self._get_edgeid_type_d(sampled_df)
else:
return (
sampled_df[src_n].values,
sampled_df[dst_n].values,
sampled_df["indices"].values,
)
def _get_edgeid_type_d(self, df):
df["type"] = self._get_type_id_from_indices(
df["indices"], self.etype_id_range_dict
)
result_d = {
etype: df[df["type"] == etype_id]
for etype, etype_id in self.etype_id_dict.items()
}
return {
etype: (df[src_n].values, df[dst_n].values, df["indices"].values)
for etype, df in result_d.items()
}
@staticmethod
def _get_type_id_from_indices(indices, etype_id_range_dict):
type_ser = cudf.Series(
cp.full(shape=len(indices), fill_value=-1, dtype=cp.int32)
)
for etype_id, (start, stop) in etype_id_range_dict.items():
range_types = (start <= indices) & (indices < stop)
type_ser[range_types] = etype_id
return type_ser
@cached_property
def extracted_subgraph(self):
assert len(self.edge_list_dict) == 1
edge_list = list(self.edge_list_dict.values())[0]
return get_subgraph_and_src_range_from_edgelist(
edge_list,
is_mg=not (self.single_gpu),
reverse_edges=False,
)
@cached_property
def extracted_reverse_subgraph(self):
assert len(self.edge_list_dict) == 1
edge_list = list(self.edge_list_dict.values())[0]
return get_subgraph_and_src_range_from_edgelist(
edge_list, is_mg=not (self.single_gpu), reverse_edges=True
)
@cached_property
def extracted_subgraphs_per_type(self):
sg_d = {}
sg_src_range_d = {}
for etype, edge_list in self.edge_list_dict.items():
(
sg_d[etype],
sg_src_range_d[etype],
) = get_subgraph_and_src_range_from_edgelist(
edge_list, is_mg=not (self.single_gpu), reverse_edges=False
)
return sg_d, sg_src_range_d
@cached_property
def extracted_reverse_subgraphs_per_type(self):
sg_d = {}
sg_src_range_d = {}
for etype, edge_list in self.edge_list_dict.items():
(
sg_d[etype],
sg_src_range_d[etype],
) = get_subgraph_and_src_range_from_edgelist(
edge_list, is_mg=not (self.single_gpu), reverse_edges=True
)
return sg_d, sg_src_range_d
@cached_property
def has_multiple_etypes(self):
return len(self.edge_list_dict) > 1
@cached_property
def etypes(self):
return list(self.edge_list_dict.keys())
def set_sg_node_dtype(self, sg):
if hasattr(self, "_sg_node_dtype"):
return self._sg_node_dtype
else:
self._sg_node_dtype = get_underlying_dtype_from_sg(sg)
return self._sg_node_dtype
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/dgl_extensions/feature_storage.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def _get_backend_lib_ar(ar):
return type(ar).__module__
def _is_vector_feature(col):
return type(col.dtype).__name__ == "ListDtype"
def _convert_ar_to_numpy(ar):
if isinstance(ar, list):
ar = np.asarray(ar)
else:
lib_name = _get_backend_lib_ar(ar)
if lib_name == "torch":
ar = ar.cpu().numpy()
elif lib_name == "cupy":
ar = ar.get()
elif lib_name == "cudf":
ar = ar.values.get()
elif lib_name == "numpy":
ar = ar
else:
raise NotImplementedError(
f"{lib_name=} not supported yet for conversion to numpy"
)
return ar
class CuFeatureStorage:
"""
Storage for node/edge feature data.
"""
def __init__(
self,
pg,
column,
storage_type,
backend_lib="torch",
indices_offset=0,
types_to_fetch=None,
):
self.pg = pg
self.column = column
if backend_lib == "torch":
from torch.utils.dlpack import from_dlpack
elif backend_lib == "tf":
from tensorflow.experimental.dlpack import from_dlpack
elif backend_lib == "cupy":
from cupy import from_dlpack
elif backend_lib == "numpy":
pass
else:
raise NotImplementedError(
f"Only PyTorch ('torch'), TensorFlow ('tf'), and CuPy ('cupy')"
f"and numpy ('numpy') backends are currently supported, "
f" got {backend_lib=}"
)
if storage_type not in ["edge", "node"]:
raise NotImplementedError("Only edge and node storage is supported")
self.storage_type = storage_type
self.from_dlpack = from_dlpack
self.indices_offset = indices_offset
self.types_to_fetch = types_to_fetch
def fetch(self, indices, device=None, pin_memory=False, **kwargs):
"""Fetch the features of the given node/edge IDs to the
given device.
Parameters
----------
indices : Tensor
Node or edge IDs.
device : Device
Device context.
pin_memory :
Returns
-------
Tensor
Feature data stored in PyTorch Tensor.
"""
# Default implementation uses synchronous fetch.
# Handle remote case
if type(self.pg).__name__ in ["RemotePropertyGraph", "RemoteMGPropertyGraph"]:
indices = _convert_ar_to_numpy(indices)
indices = indices + self.indices_offset
# TODO: Raise Issue
# We dont support numpy arrays in get_vertex_data, get_edge_data
# for Remote Graphs
indices = indices.tolist()
else:
# For local case
# we rely on cupy to handle various inputs cleanly like GPU Tensor,
# cupy array, cudf Series, cpu tensor etc
import cupy as cp
indices = cp.asarray(indices)
indices = indices + self.indices_offset
if self.storage_type == "node":
result = self.pg.get_vertex_data(
vertex_ids=indices, columns=[self.column], types=self.types_to_fetch
)
else:
result = self.pg.get_edge_data(
edge_ids=indices, columns=[self.column], types=self.types_to_fetch
)
if type(result).__name__ == "DataFrame":
if _is_vector_feature(result[self.column]):
if self.storage_type == "node":
result = self.pg.vertex_vector_property_to_array(
result, self.column
)
else:
result = self.pg.edge_vector_property_to_array(result, self.column)
if result.ndim == 2 and result.shape[1] == 1:
result = result.squeeze(1)
else:
result = result[self.column].values
if hasattr(result, "compute"):
result = result.compute()
if len(result) == 0:
raise ValueError(f"{indices=} not found in FeatureStorage")
cap = result.toDlpack()
else:
# When backend is not dataframe(pandas, cuDF) we return lists
result = result[self.column]
cap = _convert_ar_to_numpy(result)
if type(cap).__name__ == "PyCapsule":
tensor = self.from_dlpack(cap)
del cap
else:
tensor = cap
if device:
if type(tensor).__module__ == "torch":
# Can only transfer to different device for pytorch
tensor = tensor.to(device)
return tensor
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/dgl_extensions/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/dgl_extensions | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/dgl_extensions/utils/sampling.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Utils for sampling on graphstore like objects
import cugraph
import cudf
import cupy as cp
import dask_cudf
src_n = "_SRC_"
dst_n = "_DST_"
eid_n = "_EDGE_ID_"
type_n = "_TYPE_"
vid_n = "_VERTEX_"
def get_subgraph_and_src_range_from_edgelist(edge_list, is_mg, reverse_edges=False):
if reverse_edges:
edge_list = edge_list.rename(columns={src_n: dst_n, dst_n: src_n})
subgraph = cugraph.MultiGraph(directed=True)
if is_mg:
# FIXME: Can not switch to renumber = False
# For MNMG Algos
# Remove when https://github.com/rapidsai/cugraph/issues/2437
# lands
create_subgraph_f = subgraph.from_dask_cudf_edgelist
renumber = True
src_range = edge_list[src_n].min().compute(), edge_list[src_n].max().compute()
else:
# Note: We have to keep renumber = False
# to handle cases when the seed_nodes is not present in subgraph
create_subgraph_f = subgraph.from_cudf_edgelist
renumber = False
src_range = edge_list[src_n].min(), edge_list[src_n].max()
create_subgraph_f(
edge_list,
source=src_n,
destination=dst_n,
edge_attr=eid_n,
renumber=renumber,
)
if hasattr(subgraph, "input_df"):
subgraph.input_df = None
del edge_list
return subgraph, src_range
def sample_multiple_sgs(
sgs,
sgs_src_range_obj,
sample_f,
start_list_d,
start_list_dtype,
edge_dir,
fanout,
with_replacement,
):
start_list_types = list(start_list_d.keys())
output_dfs = []
for can_etype, sg in sgs.items():
start_list_range = sgs_src_range_obj[can_etype]
# TODO: Remove when we remove the existing cugraph stores
if isinstance(can_etype, str):
can_etype = _convert_can_etype_s_to_tup(can_etype)
if _edge_types_contains_canonical_etype(can_etype, start_list_types, edge_dir):
if edge_dir == "in":
subset_type = can_etype[2]
else:
subset_type = can_etype[0]
output = sample_single_sg(
sg,
sample_f,
start_list_d[subset_type],
start_list_dtype,
start_list_range,
fanout,
with_replacement,
)
output_dfs.append(output)
if len(output_dfs) == 0:
empty_df = cudf.DataFrame({"sources": [], "destinations": [], "indices": []})
return empty_df.astype(cp.int32)
return cudf.concat(output_dfs, ignore_index=True)
def sample_single_sg(
sg,
sample_f,
start_list,
start_list_dtype,
start_list_range,
fanout,
with_replacement,
):
if isinstance(start_list, dict):
start_list = cudf.concat(list(start_list.values()))
# Uniform sampling fails when the dtype
# of the seed dtype is not same as the node dtype
start_list = start_list.astype(start_list_dtype)
# Filter start list by ranges
# to enure the seed is with in index values
# see below:
# https://github.com/rapidsai/cugraph/blob/branch-22.12/cpp/src/prims/per_v_random_select_transform_outgoing_e.cuh
start_list = start_list[
(start_list >= start_list_range[0]) & (start_list <= start_list_range[1])
]
if len(start_list) == 0:
empty_df = cudf.DataFrame({"sources": [], "destinations": [], "indices": []})
return empty_df
sampled_df = sample_f(
sg,
start_list=start_list,
fanout_vals=[fanout],
with_replacement=with_replacement,
)
if isinstance(sampled_df, dask_cudf.DataFrame):
sampled_df = sampled_df.compute()
return sampled_df
def _edge_types_contains_canonical_etype(can_etype, edge_types, edge_dir):
src_type, _, dst_type = can_etype
if edge_dir == "in":
return dst_type in edge_types
else:
return src_type in edge_types
def _convert_can_etype_s_to_tup(canonical_etype_s):
src_type, etype, dst_type = canonical_etype_s.split(",")
src_type = src_type[2:-1]
dst_type = dst_type[2:-2]
etype = etype[2:-1]
return (src_type, etype, dst_type)
def create_cp_result_ls(d):
cupy_result_ls = []
for k, df in d.items():
if len(df) == 0:
cupy_result_ls.append(cp.empty(shape=0, dtype=cp.int32))
cupy_result_ls.append(cp.empty(shape=0, dtype=cp.int32))
cupy_result_ls.append(cp.empty(shape=0, dtype=cp.int32))
else:
cupy_result_ls.append(df[src_n].values)
cupy_result_ls.append(df[dst_n].values)
cupy_result_ls.append(df[eid_n].values)
return cupy_result_ls
def get_underlying_dtype_from_sg(sg):
"""
Returns the underlying dtype of the subgraph
"""
# FIXME: Remove after we have consistent naming
# https://github.com/rapidsai/cugraph/issues/2618
sg_columns = sg.edgelist.edgelist_df.columns
if "src" in sg_columns:
# src for single node graph
sg_node_dtype = sg.edgelist.edgelist_df["src"].dtype
elif src_n in sg_columns:
# _SRC_ for multi-node graphs
sg_node_dtype = sg.edgelist.edgelist_df[src_n].dtype
else:
raise ValueError(f"Source column {src_n} not found in the subgraph")
return sg_node_dtype
def sample_cugraph_graphs(
sample_f,
has_multiple_etypes,
sgs_obj,
sgs_src_range_obj,
sg_node_dtype,
nodes_ar,
replace,
fanout,
edge_dir,
):
if isinstance(nodes_ar, dict):
nodes = {t: create_cudf_series_from_node_ar(n) for t, n in nodes_ar.items()}
else:
nodes = create_cudf_series_from_node_ar(nodes_ar)
if has_multiple_etypes:
# TODO: Convert into a single call when
# https://github.com/rapidsai/cugraph/issues/2696 lands
# Uniform sampling fails when the dtype
# of the seed dtype is not same as the node dtype
sampled_df = sample_multiple_sgs(
sgs=sgs_obj,
sgs_src_range_obj=sgs_src_range_obj,
start_list_dtype=sg_node_dtype,
sample_f=sample_f,
start_list_d=nodes,
edge_dir=edge_dir,
fanout=fanout,
with_replacement=replace,
)
else:
sampled_df = sample_single_sg(
sg=sgs_obj,
start_list_range=sgs_src_range_obj,
start_list_dtype=sg_node_dtype,
sample_f=sample_f,
start_list=nodes,
fanout=fanout,
with_replacement=replace,
)
# we reverse directions when directions=='in'
if edge_dir == "in":
sampled_df = sampled_df.rename(
columns={"destinations": src_n, "sources": dst_n}
)
else:
sampled_df = sampled_df.rename(
columns={"sources": src_n, "destinations": dst_n}
)
# Transfer data to client
if isinstance(sampled_df, dask_cudf.DataFrame):
sampled_df = sampled_df.compute()
return sampled_df
def create_cudf_series_from_node_ar(node_ar):
if type(node_ar).__name__ == "PyCapsule":
return cudf.from_dlpack(node_ar)
else:
return cudf.Series(node_ar)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/dgl_extensions | rapidsai_public_repos/cugraph/python/cugraph/cugraph/gnn/dgl_extensions/utils/__init__.py | # Copyright (c) 2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/dataset.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import yaml
import os
import pandas as pd
from pathlib import Path
from cugraph.structure.graph_classes import Graph
class DefaultDownloadDir:
"""
Maintains a path to be used as a default download directory.
All DefaultDownloadDir instances are based on RAPIDS_DATASET_ROOT_DIR if
set, or _default_base_dir if not set.
Instances of this class are typically shared by several Dataset instances
in order to allow for the download directory to be defined and updated by
a single object.
"""
_default_base_dir = Path.home() / ".cugraph/datasets"
def __init__(self, *, subdir=""):
"""
subdir can be specified to provide a specialized dir under the base dir.
"""
self._subdir = Path(subdir)
self.reset()
@property
def path(self):
return self._path.absolute()
@path.setter
def path(self, new):
self._path = Path(new)
def reset(self):
self._basedir = Path(
os.environ.get("RAPIDS_DATASET_ROOT_DIR", self._default_base_dir)
)
self._path = self._basedir / self._subdir
default_download_dir = DefaultDownloadDir()
class Dataset:
"""
A Dataset Object, used to easily import edgelist data and cuGraph.Graph
instances.
Parameters
----------
meta_data_file_name : yaml file
The metadata file for the specific graph dataset, which includes
information on the name, type, url link, data loading format, graph
properties
"""
def __init__(
self,
metadata_yaml_file=None,
csv_file=None,
csv_header=None,
csv_delim=" ",
csv_col_names=None,
csv_col_dtypes=None,
):
self._metadata_file = None
self._dl_path = default_download_dir
self._edgelist = None
self._path = None
if metadata_yaml_file is not None and csv_file is not None:
raise ValueError("cannot specify both metadata_yaml_file and csv_file")
elif metadata_yaml_file is not None:
with open(metadata_yaml_file, "r") as file:
self.metadata = yaml.safe_load(file)
self._metadata_file = Path(metadata_yaml_file)
elif csv_file is not None:
if csv_col_names is None or csv_col_dtypes is None:
raise ValueError(
"csv_col_names and csv_col_dtypes must both be "
"not None when csv_file is specified."
)
self._path = Path(csv_file)
if self._path.exists() is False:
raise FileNotFoundError(csv_file)
self.metadata = {
"name": self._path.with_suffix("").name,
"file_type": ".csv",
"url": None,
"header": csv_header,
"delim": csv_delim,
"col_names": csv_col_names,
"col_types": csv_col_dtypes,
}
else:
raise ValueError("must specify either metadata_yaml_file or csv_file")
def __str__(self):
"""
Use the basename of the meta_data_file the instance was constructed with,
without any extension, as the string repr.
"""
# The metadata file is likely to have a more descriptive file name, so
# use that one first if present.
# FIXME: this may need to provide a more unique or descriptive string repr
if self._metadata_file is not None:
return self._metadata_file.with_suffix("").name
else:
return self.get_path().with_suffix("").name
def __download_csv(self, url):
"""
Downloads the .csv file from url to the current download path
(self._dl_path), updates self._path with the full path to the
downloaded file, and returns the latest value of self._path.
"""
self._dl_path.path.mkdir(parents=True, exist_ok=True)
filename = self.metadata["name"] + self.metadata["file_type"]
if self._dl_path.path.is_dir():
df = cudf.read_csv(url)
self._path = self._dl_path.path / filename
df.to_csv(self._path, index=False)
else:
raise RuntimeError(
f"The directory {self._dl_path.path.absolute()}" "does not exist"
)
return self._path
def unload(self):
"""
Remove all saved internal objects, forcing them to be re-created when
accessed.
NOTE: This will cause calls to get_*() to re-read the dataset file from
disk. The caller should ensure the file on disk has not moved/been
deleted/changed.
"""
self._edgelist = None
def get_edgelist(self, download=False, reader="cudf"):
"""
Return an Edgelist
Parameters
----------
download : Boolean (default=False)
Automatically download the dataset from the 'url' location within
the YAML file.
reader : 'cudf' or 'pandas' (default='cudf')
The library used to read a CSV and return an edgelist DataFrame.
"""
if self._edgelist is None:
full_path = self.get_path()
if not full_path.is_file():
if download:
full_path = self.__download_csv(self.metadata["url"])
else:
raise RuntimeError(
f"The datafile {full_path} does not"
" exist. Try setting download=True"
" to download the datafile"
)
header = None
if isinstance(self.metadata["header"], int):
header = self.metadata["header"]
if reader == "cudf":
self.__reader = cudf.read_csv
elif reader == "pandas":
self.__reader = pd.read_csv
else:
raise ValueError(
"reader must be a module with a read_csv function compatible with \
cudf.read_csv"
)
self._edgelist = self.__reader(
filepath_or_buffer=full_path,
delimiter=self.metadata["delim"],
names=self.metadata["col_names"],
dtype={
self.metadata["col_names"][i]: self.metadata["col_types"][i]
for i in range(len(self.metadata["col_types"]))
},
header=header,
)
return self._edgelist.copy()
def get_graph(
self,
download=False,
create_using=Graph,
ignore_weights=False,
store_transposed=False,
):
"""
Return a Graph object.
Parameters
----------
download : Boolean (default=False)
Downloads the dataset from the web.
create_using: cugraph.Graph (instance or class), optional
(default=Graph)
Specify the type of Graph to create. Can pass in an instance to
create a Graph instance with specified 'directed' attribute.
ignore_weights : Boolean (default=False)
Ignores weights in the dataset if True, resulting in an
unweighted Graph. If False (the default), weights from the
dataset -if present- will be applied to the Graph. If the
dataset does not contain weights, the Graph returned will
be unweighted regardless of ignore_weights.
store_transposed: Boolean (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms, such as pagerank.
"""
if self._edgelist is None:
self.get_edgelist(download)
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
# what about BFS if trnaposed is True
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type:
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
if len(self.metadata["col_names"]) > 2 and not (ignore_weights):
G.from_cudf_edgelist(
self._edgelist,
source=self.metadata["col_names"][0],
destination=self.metadata["col_names"][1],
edge_attr=self.metadata["col_names"][2],
store_transposed=store_transposed,
)
else:
G.from_cudf_edgelist(
self._edgelist,
source=self.metadata["col_names"][0],
destination=self.metadata["col_names"][1],
store_transposed=store_transposed,
)
return G
def get_path(self):
"""
Returns the location of the stored dataset file
"""
if self._path is None:
self._path = self._dl_path.path / (
self.metadata["name"] + self.metadata["file_type"]
)
return self._path.absolute()
def is_directed(self):
"""
Returns True if the graph is a directed graph.
"""
return self.metadata["is_directed"]
def is_multigraph(self):
"""
Returns True if the graph is a multigraph.
"""
return self.metadata["is_multigraph"]
def is_symmetric(self):
"""
Returns True if the graph is symmetric.
"""
return self.metadata["is_symmetric"]
def number_of_nodes(self):
"""
An alias of number_of_vertices()
"""
return self.number_of_vertices()
def number_of_vertices(self):
"""
Get the number of vertices in the graph.
"""
return self.metadata["number_of_nodes"]
def number_of_edges(self):
"""
Get the number of edges in the graph.
"""
return self.metadata["number_of_edges"]
def download_all(force=False):
"""
Looks in `metadata` directory and downloads all datafiles from the the URLs
provided in each YAML file.
Parameters
force : Boolean (default=False)
Overwrite any existing copies of datafiles.
"""
default_download_dir.path.mkdir(parents=True, exist_ok=True)
meta_path = Path(__file__).parent.absolute() / "metadata"
for file in meta_path.iterdir():
meta = None
if file.suffix == ".yaml":
with open(meta_path / file, "r") as metafile:
meta = yaml.safe_load(metafile)
if "url" in meta:
filename = meta["name"] + meta["file_type"]
save_to = default_download_dir.path / filename
if not save_to.is_file() or force:
df = cudf.read_csv(meta["url"])
df.to_csv(save_to, index=False)
def set_download_dir(path):
"""
Set the download location for datasets
Parameters
----------
path : String
Location used to store datafiles
"""
if path is None:
default_download_dir.reset()
else:
default_download_dir.path = path
def get_download_dir():
return default_download_dir.path
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/datasets_config.yaml | ---
fetch: "False"
force: "False"
# path where datasets will be downloaded to and stored
download_dir: "datasets"
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
# datasets module
from cugraph.datasets.dataset import (
Dataset,
download_all,
set_download_dir,
get_download_dir,
default_download_dir,
)
from cugraph.datasets import metadata
# metadata path for .yaml files
meta_path = Path(__file__).parent / "metadata"
cyber = Dataset(meta_path / "cyber.yaml")
dining_prefs = Dataset(meta_path / "dining_prefs.yaml")
dolphins = Dataset(meta_path / "dolphins.yaml")
email_Eu_core = Dataset(meta_path / "email_Eu_core.yaml")
karate = Dataset(meta_path / "karate.yaml")
karate_asymmetric = Dataset(meta_path / "karate_asymmetric.yaml")
karate_disjoint = Dataset(meta_path / "karate_disjoint.yaml")
netscience = Dataset(meta_path / "netscience.yaml")
polbooks = Dataset(meta_path / "polbooks.yaml")
small_line = Dataset(meta_path / "small_line.yaml")
small_tree = Dataset(meta_path / "small_tree.yaml")
toy_graph = Dataset(meta_path / "toy_graph.yaml")
toy_graph_undirected = Dataset(meta_path / "toy_graph_undirected.yaml")
# Benchmarking datasets: be mindful of memory usage
# 250 MB
soc_livejournal = Dataset(meta_path / "soc-livejournal1.yaml")
# 965 MB
cit_patents = Dataset(meta_path / "cit-patents.yaml")
# 1.8 GB
europe_osm = Dataset(meta_path / "europe_osm.yaml")
# 1.5 GB
hollywood = Dataset(meta_path / "hollywood.yaml")
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/metadata/cyber.yaml | name: cyber
file_type: .csv
description:
IP edge pairs of a cyber data set from the University of New South Wales.
author: Moustafa, Nour, and Jill Slay
refs:
Moustafa, Nour. Designing an online and reliable statistical anomaly detection
framework for dealing with large high-speed network traffic. Diss. University
of New South Wales, Canberra, Australia, 2017.
delim: ","
header: 0
col_names:
- idx
- srcip
- dstip
col_types:
- int32
- str
- str
has_loop: false
is_directed: true
is_multigraph: false
is_symmetric: false
number_of_edges: 2546575
number_of_nodes: 706529
url: https://data.rapids.ai/cugraph/datasets/cyber.csv
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/metadata/polbooks.yaml | name: polbooks
file_type: .csv
description:
A network of books about U.S. politics published close to the 2004 U.S. presidential election, and sold by Amazon.com. Edges between books represent frequent copurchasing of those books by the same buyers.
author: V. Krebs
refs:
V. Krebs, "The political books network", unpublished, https://doi.org/10.2307/40124305 [@sci-hub]
delim: " "
header: None
col_names:
- src
- dst
- wgt
col_types:
- int32
- int32
- float32
has_loop: false
is_directed: true
is_multigraph: false
is_symmetric: true
number_of_edges: 882
number_of_nodes: 105
url: https://data.rapids.ai/cugraph/datasets/polbooks.csv
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/metadata/small_tree.yaml | name: small_tree
file_type: .csv
description:
The `small_tree` dataset was created by Nvidia for testing/demonstration
purposes, and consists of a small (9 nodes) directed tree.
author: Nvidia
refs: null
delim: " "
header: None
col_names:
- src
- dst
- wgt
col_types:
- int32
- int32
- float32
has_loop: false
is_directed: true
is_multigraph: false
is_symmetric: false
number_of_edges: 11
number_of_nodes: 9
url: https://data.rapids.ai/cugraph/datasets/small_tree.csv
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/metadata/small_line.yaml | name: small_line
file_type: .csv
description:
The `small_line` dataset was created by Nvidia for testing and demonstration
purposes, and consists of a small (10 nodes) path/linear graph.
author: Nvidia
refs: null
delim: " "
header: None
col_names:
- src
- dst
- wgt
col_types:
- int32
- int32
- float32
has_loop: false
is_directed: false
is_multigraph: false
is_symmetric: true
number_of_edges: 9
number_of_nodes: 10
url: https://data.rapids.ai/cugraph/datasets/small_line.csv
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/metadata/hollywood.yaml | name: hollywood
file_type: .csv
description:
A graph of movie actors where vertices are actors, and two actors are
joined by an edge whenever they appeared in a movie together.
author: Laboratory for Web Algorithmics (LAW)
refs:
The WebGraph Framework I Compression Techniques, Paolo Boldi
and Sebastiano Vigna, Proc. of the Thirteenth International
World Wide Web Conference (WWW 2004), 2004, Manhattan, USA,
pp. 595--601, ACM Press.
delim: " "
header: None
col_names:
- src
- dst
col_types:
- int32
- int32
has_loop: false
is_directed: false
is_multigraph: false
is_symmetric: true
number_of_edges: 57515616
number_of_nodes: 1139905
url: https://data.rapids.ai/cugraph/datasets/hollywood.csv | 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets | rapidsai_public_repos/cugraph/python/cugraph/cugraph/datasets/metadata/dolphins.yaml | name: dolphins
file_type: .csv
description: An undirected social network of frequent associations between 62 dolphins in a community living off Doubtful Sound, New Zealand, as compiled by Lusseau et al. (2003).
author:
- D. Lusseau
- K. Schneider
- O. J. Boisseau
- P. Haase
- E. Slooten
- S. M. Dawson
refs:
D. Lusseau, K. Schneider, O. J. Boisseau, P. Haase, E. Slooten, and S. M. Dawson,
The bottlenose dolphin community of Doubtful Sound features a large proportion of
long-lasting associations, Behavioral Ecology and Sociobiology 54, 396-405 (2003).
delim: " "
header: None
col_names:
- src
- dst
- wgt
col_types:
- int32
- int32
- float32
has_loop: false
is_directed: false
is_multigraph: false
is_symmetric: true
number_of_edges: 159
number_of_nodes: 62
url: https://data.rapids.ai/cugraph/datasets/dolphins.csv
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.