repo_id stringlengths 21 96 | file_path stringlengths 31 155 | content stringlengths 1 92.9M | __index_level_0__ int64 0 0 |
|---|---|---|---|
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/sampling/node2vec.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pylibcugraph import (
ResourceHandle,
node2vec as pylibcugraph_node2vec,
)
from cugraph.utilities import ensure_cugraph_obj_for_nx
import cudf
def node2vec(G, start_vertices, max_depth=1, compress_result=True, p=1.0, q=1.0):
"""
Computes random walks for each node in 'start_vertices', under the
node2vec sampling framework.
References
----------
A Grover, J Leskovec: node2vec: Scalable Feature Learning for Networks,
Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge
Discovery and Data Mining, https://arxiv.org/abs/1607.00653
Parameters
----------
G : cuGraph.Graph or networkx.Graph
The graph can be either directed or undirected.
Weights in the graph are ignored.
start_vertices: int or list or cudf.Series or cudf.DataFrame
A single node or a list or a cudf.Series of nodes from which to run
the random walks. In case of multi-column vertices it should be
a cudf.DataFrame. Only supports int32 currently.
max_depth: int, optional (default=1)
The maximum depth of the random walks. If not specified, the maximum
depth is set to 1.
compress_result: bool, optional (default=True)
If True, coalesced paths are returned with a sizes array with offsets.
Otherwise padded paths are returned with an empty sizes array.
p: float, optional (default=1.0, [0 < p])
Return factor, which represents the likelihood of backtracking to
a previous node in the walk. A higher value makes it less likely to
sample a previously visited node, while a lower value makes it more
likely to backtrack, making the walk "local". A positive float.
q: float, optional (default=1.0, [0 < q])
In-out factor, which represents the likelihood of visiting nodes
closer or further from the outgoing node. If q > 1, the random walk
is likelier to visit nodes closer to the outgoing node. If q < 1, the
random walk is likelier to visit nodes further from the outgoing node.
A positive float.
Returns
-------
vertex_paths : cudf.Series or cudf.DataFrame
Series containing the vertices of edges/paths in the random walk.
edge_weight_paths: cudf.Series
Series containing the edge weights of edges represented by the
returned vertex_paths
sizes: int or cudf.Series
The path size or sizes in case of coalesced paths.
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> start_vertices = cudf.Series([0, 2], dtype=np.int32)
>>> paths, weights, path_sizes = cugraph.node2vec(G, start_vertices, 3,
... True, 0.8, 0.5)
"""
if (not isinstance(max_depth, int)) or (max_depth < 1):
raise ValueError(
f"'max_depth' must be a positive integer, " f"got: {max_depth}"
)
if not isinstance(compress_result, bool):
raise ValueError(
f"'compress_result' must be a bool, " f"got: {compress_result}"
)
if (not isinstance(p, float)) or (p <= 0.0):
raise ValueError(f"'p' must be a positive float, got: {p}")
if (not isinstance(q, float)) or (q <= 0.0):
raise ValueError(f"'q' must be a positive float, got: {q}")
G, _ = ensure_cugraph_obj_for_nx(G)
if isinstance(start_vertices, int):
start_vertices = [start_vertices]
if isinstance(start_vertices, list):
start_vertices = cudf.Series(start_vertices, dtype="int32")
# FIXME: Verify if this condition still holds
if start_vertices.dtype != "int32":
raise ValueError(
f"'start_vertices' must have int32 values, "
f"got: {start_vertices.dtype}"
)
if G.renumbered is True:
if isinstance(start_vertices, cudf.DataFrame):
start_vertices = G.lookup_internal_vertex_id(
start_vertices, start_vertices.columns
)
else:
start_vertices = G.lookup_internal_vertex_id(start_vertices)
vertex_set, edge_set, sizes = pylibcugraph_node2vec(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
seed_array=start_vertices,
max_depth=max_depth,
compress_result=compress_result,
p=p,
q=q,
)
vertex_set = cudf.Series(vertex_set)
edge_set = cudf.Series(edge_set)
sizes = cudf.Series(sizes)
if G.renumbered:
df_ = cudf.DataFrame()
df_["vertex_set"] = vertex_set
df_ = G.unrenumber(df_, "vertex_set", preserve_order=True)
vertex_set = cudf.Series(df_["vertex_set"])
return vertex_set, edge_set, sizes
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/generators/rmat.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dask.distributed import default_client, wait
import dask_cudf
from cugraph.dask.comms import comms as Comms
import cudf
import numpy as np
import cupy as cp
import cugraph
from pylibcugraph import generate_rmat_edgelist as pylibcugraph_generate_rmat_edgelist
from pylibcugraph import generate_rmat_edgelists as pylibcugraph_generate_rmat_edgelists
from pylibcugraph import ResourceHandle
_graph_types = [cugraph.Graph, cugraph.MultiGraph]
def _ensure_args_rmat(
scale=None,
num_edges=None,
a=None,
b=None,
c=None,
n_edgelists=None,
min_scale=None,
max_scale=None,
edge_factor=None,
size_distribution=None,
edge_distribution=None,
seed=None,
clip_and_flip=None,
scramble_vertex_ids=None,
include_edge_weights=None,
minimum_weight=None,
maximum_weight=None,
dtype=None,
include_edge_ids=None,
include_edge_types=None,
min_edge_type_value=None,
max_edge_type_value=None,
create_using=None,
mg=None,
multi_rmat=False,
):
"""
Ensures the args passed in are usable for the rmat() or multi rmat() API,
raises the appropriate exception if incorrect, else returns None.
"""
if create_using is not None:
if isinstance(create_using, cugraph.Graph):
directed = create_using.is_directed()
if mg and not directed:
raise TypeError(
"Only directed cugraph.Graph and None "
"are supported types for `create_using` "
"and `directed` for multi-GPU R-MAT"
)
elif create_using not in _graph_types:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
if not isinstance(seed, int):
raise TypeError("'seed' must be an int")
if include_edge_weights:
if not isinstance(include_edge_weights, bool):
raise TypeError("'include_edge_weights' must be a bool")
if maximum_weight is None or minimum_weight is None:
raise ValueError(
"'maximum_weight' and 'minimum_weight' must not be 'None' "
"if 'include_edge_weights' is True"
)
if dtype not in [
np.float32,
np.float64,
cp.float32,
cp.float64,
"float32",
"float64",
]:
raise ValueError(
"dtype must be either numpy or cupy 'float32' or 'float64' if "
"'include_edge_weights' is True."
)
if include_edge_ids:
if not isinstance(include_edge_ids, bool):
raise TypeError("'include_edge_ids' must be a bool")
if include_edge_types:
if not isinstance(include_edge_types, bool):
raise TypeError("'include_edge_types' must be a bool")
if min_edge_type_value is None and max_edge_type_value is None:
raise ValueError(
"'min_edge_type_value' and 'max_edge_type_value' must not be 'None' "
"if 'include_edge_types' is True"
)
if multi_rmat:
if not isinstance(n_edgelists, int):
raise TypeError("'n_edgelists' must be an int")
if not isinstance(min_scale, int):
raise TypeError("'min_scale' must be an int")
if not isinstance(max_scale, int):
raise TypeError("'max_scale' must be an int")
if not isinstance(edge_factor, int):
raise TypeError("'edge_factor' must be an int")
if size_distribution not in [0, 1]:
raise TypeError("'size_distribution' must be either 0 or 1")
if edge_distribution not in [0, 1]:
raise TypeError("'edge_distribution' must be either 0 or 1")
else:
if not isinstance(scale, int):
raise TypeError("'scale' must be an int")
if not isinstance(num_edges, int):
raise TypeError("'num_edges' must be an int")
if a + b + c > 1:
raise ValueError("a + b + c should be non-negative and no larger than 1.0")
if not isinstance(clip_and_flip, bool):
raise TypeError("'clip_and_flip' must be a bool")
if not isinstance(scramble_vertex_ids, bool):
raise TypeError("'scramble_vertex_ids' must be a bool")
def _sg_rmat(
scale,
num_edges,
a,
b,
c,
seed,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
create_using=cugraph.Graph,
):
"""
Calls RMAT on a single GPU and uses the resulting cuDF DataFrame
to initialize and return a cugraph Graph object specified with
create_using. If create_using is None, returns the edgelist df as-is.
"""
# FIXME: add deprecation warning for the parameter 'seed' and rename it
# 'random_state'
random_state = seed
multi_gpu = False
src, dst, weights, edge_id, edge_type = pylibcugraph_generate_rmat_edgelist(
ResourceHandle(),
random_state,
scale,
num_edges,
a,
b,
c,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
multi_gpu,
)
df = cudf.DataFrame()
df["src"] = src
df["dst"] = dst
if include_edge_weights:
df["weights"] = weights
weights = "weights"
if include_edge_ids:
df["edge_id"] = edge_id
edge_id = "edge_id"
if include_edge_types:
df["edge_type"] = edge_type
edge_type = "edge_type"
if create_using is None:
return df
if isinstance(create_using, cugraph.Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif create_using in _graph_types:
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
G.from_cudf_edgelist(
df,
source="src",
destination="dst",
weight=weights,
edge_id=edge_id,
edge_type=edge_type,
renumber=False,
)
return G
def convert_to_cudf(cp_arrays):
cp_src, cp_dst, cp_edge_weights, cp_edge_ids, cp_edge_types = cp_arrays
df = cudf.DataFrame()
df["src"] = cp_src
df["dst"] = cp_dst
if cp_edge_weights is not None:
df["weights"] = cp_edge_weights
if cp_edge_ids is not None:
df["edge_id"] = cp_edge_ids
if cp_edge_types is not None:
df["edge_type"] = cp_edge_types
return df
def _mg_rmat(
scale,
num_edges,
a,
b,
c,
seed,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
create_using=cugraph.Graph,
):
"""
Calls RMAT on multiple GPUs and uses the resulting Dask cuDF DataFrame to
initialize and return a cugraph Graph object specified with create_using.
If create_using is None, returns the Dask DataFrame edgelist as-is.
seed is used as the initial seed for the first worker used (worker 0), then
each subsequent worker will receive seed+<worker num> as the seed value.
"""
client = default_client()
worker_list = list(client.scheduler_info()["workers"].keys())
num_workers = len(worker_list)
num_edges_list = _calc_num_edges_per_worker(num_workers, num_edges)
result = []
for (i, worker_num_edges) in enumerate(num_edges_list):
unique_worker_seed = seed + i
future = client.submit(
_call_rmat,
Comms.get_session_id(),
scale,
worker_num_edges,
a,
b,
c,
unique_worker_seed,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
workers=worker_list[i],
)
result.append(future)
wait(result)
cudf_result = [client.submit(convert_to_cudf, cp_arrays) for cp_arrays in result]
wait(cudf_result)
ddf = dask_cudf.from_delayed(cudf_result)
if create_using is None:
return ddf
if isinstance(create_using, cugraph.Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif create_using in _graph_types:
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
weights = None
edge_id = None
edge_type = None
if "weights" in ddf.columns:
weights = "weights"
if "edge_id" in ddf.columns:
edge_id = "edge_id"
if "edge_type" in ddf.columns:
edge_type = "edge_type"
G.from_dask_cudf_edgelist(
ddf,
source="src",
destination="dst",
weight=weights,
edge_id=edge_id,
edge_type=edge_type,
)
return G
def _call_rmat(
sID,
scale,
num_edges_for_worker,
a,
b,
c,
random_state,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
):
"""
Callable passed to dask client.submit calls that extracts the individual
worker handle based on the dask session ID
"""
multi_gpu = True
return pylibcugraph_generate_rmat_edgelist(
ResourceHandle(Comms.get_handle(sID).getHandle()),
random_state,
scale,
num_edges_for_worker,
a,
b,
c,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
multi_gpu,
)
def _calc_num_edges_per_worker(num_workers, num_edges):
"""
Returns a list of length num_workers with the individual number of edges
each worker should generate. The sum of all edges in the list is num_edges.
"""
L = []
w = num_edges // num_workers
r = num_edges % num_workers
for i in range(num_workers):
if i < r:
L.append(w + 1)
else:
L.append(w)
return L
###############################################################################
def rmat(
scale,
num_edges,
a=0.57,
b=0.19,
c=0.19,
seed=42,
clip_and_flip=False,
scramble_vertex_ids=False,
include_edge_weights=False,
minimum_weight=None,
maximum_weight=None,
dtype=None,
include_edge_ids=False,
include_edge_types=False,
min_edge_type_value=None,
max_edge_type_value=None,
create_using=cugraph.Graph,
mg=False,
):
"""
Generate a Graph object using a Recursive MATrix (R-MAT) graph generation
algorithm.
Parameters
----------
scale : int
Scale factor to set the number of vertices in the graph. Vertex IDs have
values in [0, V), where V = 1 << 'scale'.
num_edges : int
Number of edges to generate
a : float, optional (default=0.57)
Probability of the edge being in the first partition
The Graph 500 spec sets this value to 0.57.
b : float, optional (default=0.19)
Probability of the edge being in the second partition
The Graph 500 spec sets this value to 0.19.
c : float, optional (default=0.19)
Probability of the edge being in the third partition
The Graph 500 spec sets this value to 0.19.
seed : int, optional (default=42)
Seed value for the random number generator.
clip_and_flip : bool, optional (default=False)
Flag controlling whether to generate edges only in the lower triangular
part (including the diagonal) of the graph adjacency matrix
(if set to True) or not (if set to 'false).
scramble_vertex_ids : bool, optional (default=False)
Flag controlling whether to scramble vertex ID bits (if set to `true`)
or not (if set to `false`); scrambling vertex ID bits breaks
correlation between vertex ID values and vertex degrees.
include_edge_weights : bool, optional (default=False)
Flag controlling whether to generate edges with weights
(if set to True) or not (if set to False).
minimum_weight : float
Minimum weight value to generate if 'include_edge_weights' is True
otherwise, this parameter is ignored.
maximum_weight : float
Maximum weight value to generate if 'include_edge_weights' is True
otherwise, this parameter is ignored.
dtype : numpy.float32, numpy.float64, cupy.float32, cupy.float64,
"float32", "float64"
The type of weight to generate which is ignored unless
include_weights is true.
include_edge_ids : bool, optional (default=False)
Flag controlling whether to generate edges with ids
(if set to True) or not (if set to False).
include_edge_types : bool, optional (default=False)
Flag controlling whether to generate edges with types
(if set to True) or not (if set to False).
min_edge_type_value : int
Minimum edge type to generate if 'include_edge_types' is True
otherwise, this parameter is ignored.
max_edge_type_value : int
Maximum edge type to generate if 'include_edge_types' is True
otherwise, this paramter is ignored.
create_using : cugraph Graph type or None The graph type to construct
containing the generated edges and vertices. If None is specified, the
edgelist cuDF DataFrame (or dask_cudf DataFrame for MG) is returned
as-is. This is useful for benchmarking Graph construction steps that
require raw data that includes potential self-loops, isolated vertices,
and duplicated edges. Default is cugraph.Graph.
mg : bool, optional (default=False)
If True, R-MAT generation occurs across multiple GPUs. If False, only a
single GPU is used. Default is False (single-GPU).
Returns
-------
instance of cugraph.Graph or cudf or dask_cudf DataFrame
Examples
--------
>>> import cugraph
>>> from cugraph.generators import rmat
>>> scale = 10
>>> edgefactor = 16
>>> df = rmat(
... scale,
... (2**scale)*edgefactor,
... 0.57,
... 0.19,
... 0.19,
... seed=42,
... clip_and_flip=False,
... scramble_vertex_ids=True,
... create_using=None,
... mg=False
... )
"""
_ensure_args_rmat(
scale=scale,
num_edges=num_edges,
a=a,
b=b,
c=c,
seed=seed,
clip_and_flip=clip_and_flip,
scramble_vertex_ids=scramble_vertex_ids,
include_edge_weights=include_edge_weights,
minimum_weight=minimum_weight,
maximum_weight=maximum_weight,
dtype=dtype,
include_edge_ids=include_edge_ids,
include_edge_types=include_edge_types,
min_edge_type_value=min_edge_type_value,
max_edge_type_value=max_edge_type_value,
create_using=create_using,
mg=mg,
)
if mg:
return _mg_rmat(
scale,
num_edges,
a,
b,
c,
seed,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
create_using,
)
else:
return _sg_rmat(
scale,
num_edges,
a,
b,
c,
seed,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
create_using,
)
def multi_rmat(
n_edgelists,
min_scale,
max_scale,
edge_factor,
size_distribution,
edge_distribution,
seed=42,
clip_and_flip=False,
scramble_vertex_ids=False,
include_edge_weights=False,
minimum_weight=None,
maximum_weight=None,
dtype=None,
include_edge_ids=False,
include_edge_types=False,
min_edge_type_value=None,
max_edge_type_value=None,
mg=False,
):
"""
Generate multiple Graph objects using a Recursive MATrix (R-MAT) graph
generation algorithm.
Parameters
----------
n_edgelists : int
Number of edge lists (graphs) to generate.
min_scale : int
Scale factor to set the minimum number of vertices in the graph.
max_scale : int
Scale factor to set the maximum number of vertices in the graph.
edge_factor : int
Average number of edges per vertex to generate
size_distribution : int
Distribution of the graph sizes, impacts the scale parameter of the
R-MAT generator.
'0' for POWER_LAW distribution and '1' for UNIFORM distribution.
edge_distribution : int
Edges distribution for each graph, impacts how R-MAT parameters
a,b,c,d, are set.
'0' for POWER_LAW distribution and '1' for UNIFORM distribution.
seed : int
Seed value for the random number generator.
clip_and_flip : bool, optional (default=False)
Flag controlling whether to generate edges only in the lower triangular
part (including the diagonal) of the graph adjacency matrix
(if set to True) or not (if set to False).
scramble_vertex_ids : bool
Flag controlling whether to scramble vertex ID bits (if set to True)
or not (if set to False); scrambling vertx ID bits breaks correlation
between vertex ID values and vertex degrees.
include_edge_weights : bool, optional (default=False)
Flag controlling whether to generate edges with weights
(if set to True) or not (if set to ').
minimum_weight : float
Minimum weight value to generate if 'include_edge_weights' is True
otherwise, this parameter is ignored.
maximum_weight : float
Maximum weight value to generate if 'include_edge_weights' is True
otherwise, this parameter is ignored.
include_edge_ids : bool, optional (default=False)
Flag controlling whether to generate edges with ids
(if set to True) or not (if set to False).
include_edge_types : bool, optional (default=False)
Flag controlling whether to generate edges with types
(if set to True) or not (if set to False).
min_edge_type_value : int
Minimum edge type to generate if 'include_edge_types' is True
otherwise, this parameter is ignored.
max_edge_type_value : int
Maximum edge type to generate if 'include_edge_types' is True
otherwise, this paramter is ignored.
dtype : numpy.float32, numpy.float64, cupy.float32, cupy.float64,
"float32", "float64"
The type of weight to generate which is ignored unless
include_weights is true.
mg : bool, optional (default=False)
If True, R-MATs generation occurs across multiple GPUs. If False, only a
single GPU is used. Default is False (single-GPU)
# FIXME: multi GPU RMATs generation not supported yet.
Returns
-------
list of cugraph.Graph instances
"""
_ensure_args_rmat(
n_edgelists=n_edgelists,
min_scale=min_scale,
max_scale=max_scale,
edge_factor=edge_factor,
size_distribution=size_distribution,
edge_distribution=edge_distribution,
seed=seed,
include_edge_weights=include_edge_weights,
minimum_weight=minimum_weight,
maximum_weight=maximum_weight,
dtype=dtype,
include_edge_ids=include_edge_ids,
include_edge_types=include_edge_types,
min_edge_type_value=min_edge_type_value,
max_edge_type_value=max_edge_type_value,
multi_rmat=True,
clip_and_flip=clip_and_flip,
scramble_vertex_ids=scramble_vertex_ids,
)
edgelists = pylibcugraph_generate_rmat_edgelists(
ResourceHandle(),
seed,
n_edgelists,
min_scale,
max_scale,
edge_factor,
size_distribution,
edge_distribution,
clip_and_flip,
scramble_vertex_ids,
include_edge_weights,
minimum_weight,
maximum_weight,
dtype,
include_edge_ids,
include_edge_types,
min_edge_type_value,
max_edge_type_value,
mg,
)
dfs = []
for edgelist in edgelists:
src, dst, weights, edge_id, edge_type = edgelist
df = cudf.DataFrame()
df["src"] = src
df["dst"] = dst
if weights is not None:
df["weights"] = weights
weights = "weights"
if edge_id is not None:
df["edge_id"] = edge_id
edge_id = "edge_id"
if edge_type is not None:
df["edge_type"] = edge_type
edge_type = "edge_type"
dfs.append(df)
list_G = []
for df in dfs:
G = cugraph.Graph()
G.from_cudf_edgelist(
df,
source="src",
destination="dst",
weight=weights,
edge_id=edge_id,
edge_type=edge_type,
)
list_G.append(G)
return list_G
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/generators/__init__.py | # Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .rmat import rmat, multi_rmat
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/cores/core_number.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_score_to_dictionary,
)
import cudf
from pylibcugraph import core_number as pylibcugraph_core_number, ResourceHandle
def core_number(G, degree_type="bidirectional"):
"""
Compute the core numbers for the nodes of the graph G. A k-core of a graph
is a maximal subgraph that contains nodes of degree k or more.
A node has a core number of k if it belongs a k-core but not to k+1-core.
This call does not support a graph with self-loops and parallel
edges.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
The graph should contain undirected edges where undirected edges are
represented as directed edges in both directions. While this graph
can contain edge weights, they don't participate in the calculation
of the core numbers.
The current implementation only supports undirected graphs.
degree_type: str, (default="bidirectional")
This option determines if the core number computation should be based
on input, output, or both directed edges, with valid values being
"incoming", "outgoing", and "bidirectional" respectively.
Returns
-------
df : cudf.DataFrame or python dictionary (in NetworkX input)
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding core number values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['core_number'] : cudf.Series
Contains the core number of vertices
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> df = cugraph.core_number(G)
"""
G, isNx = ensure_cugraph_obj_for_nx(G)
if G.is_directed():
raise ValueError("input graph must be undirected")
if degree_type not in ["incoming", "outgoing", "bidirectional"]:
raise ValueError(
f"'degree_type' must be either incoming, "
f"outgoing or bidirectional, got: {degree_type}"
)
vertex, core_number = pylibcugraph_core_number(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
degree_type=degree_type,
do_expensive_check=False,
)
df = cudf.DataFrame()
df["vertex"] = vertex
df["core_number"] = core_number
if G.renumbered:
df = G.unrenumber(df, "vertex")
if isNx is True:
df = df_score_to_dictionary(df, "core_number")
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/cores/k_core.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
from pylibcugraph import (
core_number as pylibcugraph_core_number,
k_core as pylibcugraph_k_core,
ResourceHandle,
)
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
cugraph_to_nx,
)
def _call_plc_core_number(G, degree_type):
vertex, core_number = pylibcugraph_core_number(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
degree_type=degree_type,
do_expensive_check=False,
)
df = cudf.DataFrame()
df["vertex"] = vertex
df["core_number"] = core_number
return df
def k_core(G, k=None, core_number=None, degree_type="bidirectional"):
"""
Compute the k-core of the graph G based on the out degree of its nodes. A
k-core of a graph is a maximal subgraph that contains nodes of degree k or
more. This call does not support a graph with self-loops and parallel
edges.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
cuGraph graph descriptor with connectivity information. The graph
should contain undirected edges where undirected edges are represented
as directed edges in both directions. While this graph can contain edge
weights, they don't participate in the calculation of the k-core.
The current implementation only supports undirected graphs.
k : int, optional (default=None)
Order of the core. This value must not be negative. If set to None, the
main core is returned.
degree_type: str, (default="bidirectional")
This option determines if the core number computation should be based
on input, output, or both directed edges, with valid values being
"incoming", "outgoing", and "bidirectional" respectively.
core_number : cudf.DataFrame, optional (default=None)
Precomputed core number of the nodes of the graph G containing two
cudf.Series of size V: the vertex identifiers and the corresponding
core number values. If set to None, the core numbers of the nodes are
calculated internally.
core_number['vertex'] : cudf.Series
Contains the vertex identifiers
core_number['values'] : cudf.Series
Contains the core number of vertices
Returns
-------
KCoreGraph : cuGraph.Graph
K Core of the input graph
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> KCoreGraph = cugraph.k_core(G)
"""
G, isNx = ensure_cugraph_obj_for_nx(G)
if degree_type not in ["incoming", "outgoing", "bidirectional"]:
raise ValueError(
f"'degree_type' must be either incoming, "
f"outgoing or bidirectional, got: {degree_type}"
)
mytype = type(G)
KCoreGraph = mytype()
if G.is_directed():
raise ValueError("G must be an undirected Graph instance")
if core_number is None:
core_number = _call_plc_core_number(G, degree_type=degree_type)
else:
if G.renumbered:
if len(G.renumber_map.implementation.col_names) > 1:
cols = core_number.columns[:-1].to_list()
else:
cols = "vertex"
core_number = G.add_internal_vertex_id(core_number, "vertex", cols)
core_number = core_number.rename(columns={"core_number": "values"})
if k is None:
k = core_number["values"].max()
src_vertices, dst_vertices, weights = pylibcugraph_k_core(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
degree_type=degree_type,
k=k,
core_result=core_number,
do_expensive_check=False,
)
k_core_df = cudf.DataFrame()
k_core_df["src"] = src_vertices
k_core_df["dst"] = dst_vertices
k_core_df["weight"] = weights
if G.renumbered:
k_core_df, src_names = G.unrenumber(k_core_df, "src", get_column_names=True)
k_core_df, dst_names = G.unrenumber(k_core_df, "dst", get_column_names=True)
else:
src_names = k_core_df.columns[0]
dst_names = k_core_df.columns[1]
if G.edgelist.weights:
KCoreGraph.from_cudf_edgelist(
k_core_df, source=src_names, destination=dst_names, edge_attr="weight"
)
else:
KCoreGraph.from_cudf_edgelist(
k_core_df,
source=src_names,
destination=dst_names,
)
if isNx is True:
KCoreGraph = cugraph_to_nx(KCoreGraph)
return KCoreGraph
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/cores/__init__.py | # Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.cores.core_number import core_number
from cugraph.cores.k_core import k_core
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/linear_assignment/lap.pxd | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.structure.graph_primtypes cimport *
cdef extern from "cugraph/algorithms.hpp" namespace "cugraph":
cdef weight_t hungarian[vertex_t,edge_t,weight_t](
const handle_t &handle,
const GraphCOOView[vertex_t,edge_t,weight_t] &graph,
vertex_t num_workers,
const vertex_t *workers,
vertex_t *assignments,
weight_t epsilon) except +
cdef weight_t hungarian[vertex_t,edge_t,weight_t](
const handle_t &handle,
const GraphCOOView[vertex_t,edge_t,weight_t] &graph,
vertex_t num_workers,
const vertex_t *workers,
vertex_t *assignments) except +
cdef extern from "cugraph/algorithms.hpp":
cdef weight_t dense_hungarian "cugraph::dense::hungarian" [vertex_t,weight_t](
const handle_t &handle,
const weight_t *costs,
vertex_t num_rows,
vertex_t num_columns,
vertex_t *assignments,
weight_t epsilon) except +
cdef weight_t dense_hungarian "cugraph::dense::hungarian" [vertex_t,weight_t](
const handle_t &handle,
const weight_t *costs,
vertex_t num_rows,
vertex_t num_columns,
vertex_t *assignments) except +
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/linear_assignment/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources lap_wrapper.pyx)
set(linked_libraries cugraph::cugraph)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX linear_assignment_
ASSOCIATED_TARGETS cugraph
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/linear_assignment/lap_wrapper.pyx | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.linear_assignment.lap cimport hungarian as c_hungarian
from cugraph.linear_assignment.lap cimport dense_hungarian as c_dense_hungarian
from cugraph.structure.graph_primtypes cimport *
from cugraph.structure import graph_primtypes_wrapper
from libc.stdint cimport uintptr_t
import cudf
import numpy as np
def sparse_hungarian(input_graph, workers, epsilon):
"""
Call the hungarian algorithm
"""
src = None
dst = None
weights = None
local_workers = None
cdef unique_ptr[handle_t] handle_ptr
handle_ptr.reset(new handle_t())
handle_ = handle_ptr.get();
"""
We need a COO of the graph.
"""
if not input_graph.edgelist:
input_graph.view_edge_list()
if input_graph.edgelist.weights is None:
raise Exception("hungarian algorithm requires weighted graph")
src = input_graph.edgelist.edgelist_df['src']
dst = input_graph.edgelist.edgelist_df['dst']
weights = input_graph.edgelist.edgelist_df["weights"]
[src, dst] = graph_primtypes_wrapper.datatype_cast([src, dst], [np.int32])
[weights] = graph_primtypes_wrapper.datatype_cast([weights], [np.float32, np.float64])
[local_workers] = graph_primtypes_wrapper.datatype_cast([workers], [np.int32])
num_verts = input_graph.number_of_vertices()
num_edges = len(src)
df = cudf.DataFrame()
df['vertex'] = workers
df['assignment'] = cudf.Series(np.zeros(len(workers), dtype=np.int32))
if epsilon == None:
epsilon = 1e-6
cdef uintptr_t c_src = src.__cuda_array_interface__['data'][0]
cdef uintptr_t c_dst = dst.__cuda_array_interface__['data'][0]
cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0]
cdef uintptr_t c_workers = local_workers.__cuda_array_interface__['data'][0]
cdef uintptr_t c_identifier = df['vertex'].__cuda_array_interface__['data'][0];
cdef uintptr_t c_assignment = df['assignment'].__cuda_array_interface__['data'][0];
cdef float c_epsilon_float = epsilon
cdef double c_epsilon_double = epsilon
cdef GraphCOOView[int,int,float] g_float
cdef GraphCOOView[int,int,double] g_double
if weights.dtype == np.float32:
g_float = GraphCOOView[int,int,float](<int*>c_src, <int*>c_dst, <float*>c_weights, num_verts, num_edges)
cost = c_hungarian[int,int,float](handle_[0], g_float, len(workers), <int*>c_workers, <int*>c_assignment, c_epsilon_float)
else:
g_double = GraphCOOView[int,int,double](<int*>c_src, <int*>c_dst, <double*>c_weights, num_verts, num_edges)
cost = c_hungarian[int,int,double](handle_[0], g_double, len(workers), <int*>c_workers, <int*>c_assignment, c_epsilon_double)
return cost, df
def dense_hungarian(costs, num_rows, num_columns, epsilon):
"""
Call the dense hungarian algorithm
"""
if type(costs) is not cudf.Series:
raise("costs must be a cudf.Series")
cdef unique_ptr[handle_t] handle_ptr
handle_ptr.reset(new handle_t())
handle_ = handle_ptr.get();
assignment = cudf.Series(np.zeros(num_rows, dtype=np.int32))
if epsilon == None:
epsilon = 1e-6
cdef uintptr_t c_costs = costs.__cuda_array_interface__['data'][0]
cdef uintptr_t c_assignment = assignment.__cuda_array_interface__['data'][0]
cdef float c_epsilon_float = epsilon
cdef double c_epsilon_double = epsilon
if costs.dtype == np.float32:
cost = c_dense_hungarian[int,float](handle_[0], <float*> c_costs, num_rows, num_columns, <int*> c_assignment, c_epsilon_float)
elif costs.dtype == np.float64:
cost = c_dense_hungarian[int,double](handle_[0], <double*> c_costs, num_rows, num_columns, <int*> c_assignment, c_epsilon_double)
elif costs.dtype == np.int32:
cost = c_dense_hungarian[int,double](handle_[0], <double*> c_costs, num_rows, num_columns, <int*> c_assignment)
else:
raise("unsported type: ", costs.dtype)
return cost, assignment
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/linear_assignment/__init__.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.linear_assignment.lap import hungarian, dense_hungarian
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/linear_assignment/lap.py | # Copyright (c) 2020-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
from cugraph.linear_assignment import lap_wrapper
def hungarian(G, workers, epsilon=None):
"""
Execute the Hungarian algorithm against a symmetric, weighted,
bipartite graph.
As a bipartite graph, the vertex set of the graph can be partitioned
into two disjoint sets such that all edges connect a vertex from
one set to a vertex of the other set. The workers variable identifies
one of the sets of vertices, the other set is all vertices not in
the workers set (V - workers).
The edge weights reflect the cost of assigning a particular job to a
worker.
The Hungarian algorithm identifies the lowest cost matching of vertices
such that all workers that can be assigned work are assigned exactly
on job.
Parameters
----------
G : cugraph.Graph
cuGraph graph descriptor, should contain the connectivity information
as an an edge list. Edge weights are required. If an edge list is
not provided then it will be computed.
workers : cudf.Series or cudf.DataFrame
A series or column that identifies the vertex ids of the vertices
in the workers set. In case of multi-column vertices, it should be a
cudf.DataFrame. All vertices in G that are not in the workers
set are implicitly assigned to the jobs set.
epsilon : float/double (matching weight in graph), optional (default=None)
Used for determining when value is close enough to zero to consider 0.
Defaults (if not specified) to 1e-6 in the C++ code. Unused for
integer weight types.
Returns
-------
cost : matches costs.dtype
The cost of the overall assignment
df : cudf.DataFrame
df['vertex'][i] gives the vertex id of the i'th vertex. Only vertices
in the workers list are defined in this column.
df['assignment'][i] gives the vertex id of the "job" assigned to the
corresponding vertex.
Examples
--------
>>> workers, G, costs = cugraph.utils.create_random_bipartite(5, 5,
... 100, float)
>>> cost, df = cugraph.hungarian(G, workers)
"""
if G.renumbered:
if isinstance(workers, cudf.DataFrame):
local_workers = G.lookup_internal_vertex_id(workers, workers.columns)
else:
local_workers = G.lookup_internal_vertex_id(workers)
else:
local_workers = workers
cost, df = lap_wrapper.sparse_hungarian(G, local_workers, epsilon)
if G.renumbered:
df = G.unrenumber(df, "vertex")
return cost, df
def dense_hungarian(costs, num_rows, num_columns, epsilon=None):
"""
Execute the Hungarian algorithm against a dense bipartite
graph representation.
*NOTE*: This API is unstable and subject to change
The Hungarian algorithm identifies the lowest cost matching of vertices
such that all workers that can be assigned work are assigned exactly
on job.
Parameters
----------
costs : cudf.Series
A dense representation (row major order) of the bipartite
graph. Each row represents a worker, each column represents
a task, cost[i][j] represents the cost of worker i performing
task j.
num_rows : int
Number of rows in the matrix
num_columns : int
Number of columns in the matrix
epsilon : float or double (matching weight type in graph)
Used for determining when value is close enough to zero to consider 0.
Defaults (if not specified) to 1e-6 in the C++ code. Unused for
integer weight types.
Returns
-------
cost : matches costs.dtype
The cost of the overall assignment
assignment : cudf.Series
assignment[i] gives the vertex id of the task assigned to the
worker i
Examples
--------
>>> workers, G, costs = cugraph.utils.create_random_bipartite(5, 5,
... 100, float)
>>> costs_flattened = cudf.Series(costs.flatten())
>>> cost, assignment = cugraph.dense_hungarian(costs_flattened, 5, 5)
"""
return lap_wrapper.dense_hungarian(costs, num_rows, num_columns, epsilon)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tree/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources minimum_spanning_tree_wrapper.pyx)
set(linked_libraries cugraph::cugraph)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX tree_
ASSOCIATED_TARGETS cugraph
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tree/minimum_spanning_tree_wrapper.pyx | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.tree.minimum_spanning_tree cimport minimum_spanning_tree as c_mst
from cugraph.structure.graph_primtypes cimport *
from cugraph.structure import graph_primtypes_wrapper
from libc.stdint cimport uintptr_t
import cudf
# FIXME: these are transitive dependencies and are not currently listed in the
# conda recipe. Either make them optional or add them to the conda recipe.
import numpy as np
import cupy as cp
def mst_float(num_verts, num_edges, offsets, indices, weights):
cdef unique_ptr[handle_t] handle_ptr
handle_ptr.reset(new handle_t())
handle_ = handle_ptr.get();
cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0]
cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0]
cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0]
cdef GraphCSRView[int,int,float] graph_float
graph_float = GraphCSRView[int,int,float](<int*>c_offsets, <int*>c_indices, <float*>c_weights, num_verts, num_edges)
return coo_to_df(move(c_mst[int,int,float](handle_[0], graph_float)))
def mst_double(num_verts, num_edges, offsets, indices, weights):
cdef unique_ptr[handle_t] handle_ptr
handle_ptr.reset(new handle_t())
handle_ = handle_ptr.get();
cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0]
cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0]
cdef uintptr_t c_weights = weights.__cuda_array_interface__['data'][0]
cdef GraphCSRView[int,int,double] graph_double
graph_double = GraphCSRView[int,int,double](<int*>c_offsets, <int*>c_indices, <double*>c_weights, num_verts, num_edges)
return coo_to_df(move(c_mst[int,int,double](handle_[0], graph_double)))
def minimum_spanning_tree(input_graph):
if not input_graph.adjlist:
input_graph.view_adj_list()
[offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32])
num_verts = input_graph.number_of_vertices()
num_edges = input_graph.number_of_edges(directed_edges=True)
if input_graph.adjlist.weights is not None:
[weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64])
else:
weights = cudf.Series(cp.full(num_edges, 1.0, dtype=np.float32))
if graph_primtypes_wrapper.weight_type(input_graph) == np.float32:
df = mst_float(num_verts, num_edges, offsets, indices, weights)
return df
else:
return mst_double(num_verts, num_edges, offsets, indices, weights)
def maximum_spanning_tree(input_graph):
if not input_graph.adjlist:
input_graph.view_adj_list()
[offsets, indices] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32])
num_verts = input_graph.number_of_vertices()
num_edges = input_graph.number_of_edges(directed_edges=True)
if input_graph.adjlist.weights is not None:
[weights] = graph_primtypes_wrapper.datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64])
else:
weights = cudf.Series(cp.full(num_edges, 1.0, dtype=np.float32))
if graph_primtypes_wrapper.weight_type(input_graph) == np.float32:
df = mst_float(num_verts, num_edges, offsets, indices, weights)
return df
else:
return mst_double(num_verts, num_edges, offsets, indices, weights)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tree/minimum_spanning_tree.pxd | # Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.structure.graph_primtypes cimport *
cdef extern from "cugraph/algorithms.hpp" namespace "cugraph":
cdef unique_ptr[GraphCOO[VT,ET,WT]] minimum_spanning_tree[VT,ET,WT](const handle_t &handle,
const GraphCSRView[VT,ET,WT] &graph) except +
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tree/minimum_spanning_tree.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.tree import minimum_spanning_tree_wrapper
from cugraph.structure.graph_classes import Graph
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
cugraph_to_nx,
)
def _minimum_spanning_tree_subgraph(G):
mst_subgraph = Graph()
if G.is_directed():
raise ValueError("input graph must be undirected")
mst_df = minimum_spanning_tree_wrapper.minimum_spanning_tree(G)
if G.renumbered:
mst_df = G.unrenumber(mst_df, "src")
mst_df = G.unrenumber(mst_df, "dst")
mst_subgraph.from_cudf_edgelist(
mst_df, source="src", destination="dst", edge_attr="weight"
)
return mst_subgraph
def _maximum_spanning_tree_subgraph(G):
mst_subgraph = Graph()
if G.is_directed():
raise ValueError("input graph must be undirected")
if not G.adjlist:
G.view_adj_list()
if G.adjlist.weights is not None:
G.adjlist.weights = G.adjlist.weights.mul(-1)
mst_df = minimum_spanning_tree_wrapper.minimum_spanning_tree(G)
# revert to original weights
if G.adjlist.weights is not None:
G.adjlist.weights = G.adjlist.weights.mul(-1)
mst_df["weight"] = mst_df["weight"].mul(-1)
if G.renumbered:
mst_df = G.unrenumber(mst_df, "src")
mst_df = G.unrenumber(mst_df, "dst")
mst_subgraph.from_cudf_edgelist(
mst_df, source="src", destination="dst", edge_attr="weight"
)
return mst_subgraph
def minimum_spanning_tree(G, weight=None, algorithm="boruvka", ignore_nan=False):
"""
Returns a minimum spanning tree (MST) or forest (MSF) on an undirected
graph
Parameters
----------
G : cuGraph.Graph or networkx.Graph
cuGraph graph descriptor with connectivity information.
weight : string
default to the weights in the graph, if the graph edges do not have a
weight attribute a default weight of 1 will be used.
algorithm : string
Default to 'boruvka'. The parallel algorithm to use when finding a
minimum spanning tree.
ignore_nan : bool
Default to False
Returns
-------
G_mst : cuGraph.Graph or networkx.Graph
A graph descriptor with a minimum spanning tree or forest.
The networkx graph will not have all attributes copied over
Examples
--------
>>> from cugraph.datasets import netscience
>>> G = netscience.get_graph(download=True)
>>> G_mst = cugraph.minimum_spanning_tree(G)
"""
G, isNx = ensure_cugraph_obj_for_nx(G)
if isNx is True:
mst = _minimum_spanning_tree_subgraph(G)
return cugraph_to_nx(mst)
else:
return _minimum_spanning_tree_subgraph(G)
def maximum_spanning_tree(G, weight=None, algorithm="boruvka", ignore_nan=False):
"""
Returns a maximum spanning tree (MST) or forest (MSF) on an undirected
graph. Also computes the adjacency list if G does not have one.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
cuGraph graph descriptor with connectivity information.
weight : string
default to the weights in the graph, if the graph edges do not have a
weight attribute a default weight of 1 will be used.
algorithm : string
Default to 'boruvka'. The parallel algorithm to use when finding a
maximum spanning tree.
ignore_nan : bool
Default to False
Returns
-------
G_mst : cuGraph.Graph or networkx.Graph
A graph descriptor with a maximum spanning tree or forest.
The networkx graph will not have all attributes copied over
Examples
--------
>>> from cugraph.datasets import netscience
>>> G = netscience.get_graph(download=True)
>>> G_mst = cugraph.maximum_spanning_tree(G)
"""
G, isNx = ensure_cugraph_obj_for_nx(G)
if isNx is True:
mst = _maximum_spanning_tree_subgraph(G)
return cugraph_to_nx(mst)
else:
return _maximum_spanning_tree_subgraph(G)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/tree/__init__.py | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.tree.minimum_spanning_tree import (
minimum_spanning_tree,
maximum_spanning_tree,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/centrality/degree_centrality.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_score_to_dictionary,
)
def degree_centrality(G, normalized=True):
"""
Computes the degree centrality of each vertex of the input graph.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
cuGraph graph descriptor with connectivity information. The graph can
contain either directed or undirected edges.
normalized : bool, optional, default=True
If True normalize the resulting degree centrality values
Returns
-------
df : cudf.DataFrame or Dictionary if using NetworkX
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding degree centrality values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['degree_centrality'] : cudf.Series
Contains the degree centrality of vertices
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> dc = cugraph.degree_centrality(G)
"""
G, isNx = ensure_cugraph_obj_for_nx(G)
df = G.degree()
df.rename(columns={"degree": "degree_centrality"}, inplace=True)
if normalized:
df["degree_centrality"] /= G.number_of_nodes() - 1
if isNx is True:
dict = df_score_to_dictionary(df, "degree_centrality")
return dict
else:
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/centrality/eigenvector_centrality.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pylibcugraph import (
eigenvector_centrality as pylib_eigen,
ResourceHandle,
)
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_score_to_dictionary,
)
import cudf
import warnings
def eigenvector_centrality(G, max_iter=100, tol=1.0e-6):
"""
Compute the eigenvector centrality for a graph G.
Eigenvector centrality computes the centrality for a node based on the
centrality of its neighbors. The eigenvector centrality for node i is the
i-th element of the vector x defined by the eigenvector equation.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
cuGraph graph descriptor with connectivity information. The graph can
contain either directed or undirected edges.
max_iter : int, optional (default=100)
The maximum number of iterations before an answer is returned. This can
be used to limit the execution time and do an early exit before the
solver reaches the convergence tolerance.
tol : float, optional (default=1e-6)
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0e-6.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 1e-2 and 1e-6 are
acceptable.
normalized : not supported
If True normalize the resulting eigenvector centrality values
Returns
-------
df : cudf.DataFrame or Dictionary if using NetworkX
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding eigenvector centrality values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['eigenvector_centrality'] : cudf.Series
Contains the eigenvector centrality of vertices
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> ec = cugraph.eigenvector_centrality(G)
"""
if (not isinstance(max_iter, int)) or max_iter <= 0:
raise ValueError(f"'max_iter' must be a positive integer" f", got: {max_iter}")
if (not isinstance(tol, float)) or (tol <= 0.0):
raise ValueError(f"'tol' must be a positive float, got: {tol}")
G, isNx = ensure_cugraph_obj_for_nx(G, store_transposed=True)
if G.store_transposed is False:
warning_msg = (
"Eigenvector centrality expects the 'store_transposed' "
"flag to be set to 'True' for optimal performance "
"during the graph creation"
)
warnings.warn(warning_msg, UserWarning)
vertices, values = pylib_eigen(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
epsilon=tol,
max_iterations=max_iter,
do_expensive_check=False,
)
vertices = cudf.Series(vertices)
values = cudf.Series(values)
df = cudf.DataFrame()
df["vertex"] = vertices
df["eigenvector_centrality"] = values
if G.renumbered:
df = G.unrenumber(df, "vertex")
if isNx is True:
dict = df_score_to_dictionary(df, "eigenvector_centrality")
return dict
else:
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/centrality/betweenness_centrality.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pylibcugraph import (
betweenness_centrality as pylibcugraph_betweenness_centrality,
edge_betweenness_centrality as pylibcugraph_edge_betweenness_centrality,
ResourceHandle,
)
from cugraph.utilities import (
df_edge_score_to_dictionary,
ensure_cugraph_obj_for_nx,
df_score_to_dictionary,
)
import cudf
import warnings
import numpy as np
from typing import Union
def betweenness_centrality(
G,
k: Union[int, list, cudf.Series, cudf.DataFrame] = None,
normalized: bool = True,
weight: cudf.DataFrame = None,
endpoints: bool = False,
seed: int = None,
random_state: int = None,
result_dtype: Union[np.float32, np.float64] = np.float64,
) -> Union[cudf.DataFrame, dict]:
"""
Compute the betweenness centrality for all vertices of the graph G.
Betweenness centrality is a measure of the number of shortest paths that
pass through a vertex. A vertex with a high betweenness centrality score
has more paths passing through it and is therefore believed to be more
important.
To improve performance. rather than doing an all-pair shortest path,
a sample of k starting vertices can be used.
CuGraph does not currently support 'weight' parameters.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
The graph can be either directed (Graph(directed=True)) or undirected.
The current implementation uses a parallel variation of the Brandes
Algorithm (2001) to compute exact or approximate betweenness.
If weights are provided in the edgelist, they will not be used.
k : int, list or cudf object or None, optional (default=None)
If k is not None, use k node samples to estimate betweenness. Higher
values give better approximation. If k is either a list, a cudf DataFrame,
or a dask_cudf DataFrame, then its contents are assumed to be vertex
identifiers to be used for estimation. If k is None (the default), all the
vertices are used to estimate betweenness. Vertices obtained through
sampling or defined as a list will be used as sources for traversals inside
the algorithm.
normalized : bool, optional (default=True)
If true, the betweenness values are normalized by
__2 / ((n - 1) * (n - 2))__ for undirected Graphs, and
__1 / ((n - 1) * (n - 2))__ for directed Graphs
where n is the number of nodes in G.
Normalization will ensure that values are in [0, 1],
this normalization scales for the highest possible value where one
node is crossed by every single shortest path.
weight : cudf.DataFrame, optional (default=None)
Specifies the weights to be used for each edge.
Should contain a mapping between
edges and weights.
(Not Supported): if weights are provided at the Graph creation,
they will not be used.
endpoints : bool, optional (default=False)
If true, include the endpoints in the shortest path counts.
seed : int, optional (default=None)
if k is specified and k is an integer, use seed to initialize
the random number generator.
Using None defaults to a hash of process id, time, and hostname
If k is either None or list: seed parameter is ignored.
This parameter is here for backwards-compatibility and identical
to 'random_state'.
random_state : int, optional (default=None)
if k is specified and k is an integer, use random_state to initialize
the random number generator.
Using None defaults to a hash of process id, time, and hostname
If k is either None or list: random_state parameter is ignored.
result_dtype : np.float32 or np.float64, optional, default=np.float64
Indicate the data type of the betweenness centrality scores.
Returns
-------
df : cudf.DataFrame or Dictionary if using NetworkX
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding betweenness centrality values.
Please note that the resulting the 'vertex' column might not be
in ascending order. The Dictionary contains the same two columns
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['betweenness_centrality'] : cudf.Series
Contains the betweenness centrality of vertices
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> bc = cugraph.betweenness_centrality(G)
"""
if seed is not None:
warning_msg = (
"This parameter is deprecated and will be remove "
"in the next release. Use 'random_state' instead."
)
warnings.warn(warning_msg, UserWarning)
G, isNx = ensure_cugraph_obj_for_nx(G)
if weight is not None:
raise NotImplementedError(
"weighted implementation of betweenness "
"centrality not currently supported"
)
if G.store_transposed is True:
warning_msg = (
"Betweenness centrality expects the 'store_transposed' flag "
"to be set to 'False' for optimal performance during "
"the graph creation"
)
warnings.warn(warning_msg, UserWarning)
# FIXME: Should we now remove this paramter?
if result_dtype not in [np.float32, np.float64]:
raise TypeError("result type can only be np.float32 or np.float64")
else:
warning_msg = (
"This parameter is deprecated and will be remove " "in the next release."
)
warnings.warn(warning_msg, PendingDeprecationWarning)
if not isinstance(k, (cudf.DataFrame, cudf.Series)):
if isinstance(k, list):
vertex_dtype = G.edgelist.edgelist_df.dtypes[0]
k = cudf.Series(k, dtype=vertex_dtype)
if isinstance(k, (cudf.DataFrame, cudf.Series)):
if G.renumbered:
k = G.lookup_internal_vertex_id(k)
vertices, values = pylibcugraph_betweenness_centrality(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
k=k,
random_state=random_state,
normalized=normalized,
include_endpoints=endpoints,
do_expensive_check=False,
)
vertices = cudf.Series(vertices)
values = cudf.Series(values)
df = cudf.DataFrame()
df["vertex"] = vertices
df["betweenness_centrality"] = values
if G.renumbered:
df = G.unrenumber(df, "vertex")
if df["betweenness_centrality"].dtype != result_dtype:
df["betweenness_centrality"] = df["betweenness_centrality"].astype(result_dtype)
if isNx is True:
dict = df_score_to_dictionary(df, "betweenness_centrality")
return dict
else:
return df
def edge_betweenness_centrality(
G,
k: Union[int, list, cudf.Series, cudf.DataFrame] = None,
normalized: bool = True,
weight: cudf.DataFrame = None,
seed: int = None,
result_dtype: Union[np.float32, np.float64] = np.float64,
) -> Union[cudf.DataFrame, dict]:
"""
Compute the edge betweenness centrality for all edges of the graph G.
Betweenness centrality is a measure of the number of shortest paths
that pass over an edge. An edge with a high betweenness centrality
score has more paths passing over it and is therefore believed to be
more important.
To improve performance, rather than doing an all-pair shortest path,
a sample of k starting vertices can be used.
CuGraph does not currently support the 'weight' parameter.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
The graph can be either directed (Graph(directed=True)) or undirected.
The current implementation uses BFS traversals. Use weight parameter
if weights need to be considered (currently not supported).
k : int or list or None, optional (default=None)
If k is not None, use k node samples to estimate betweenness. Higher
values give better approximation. If k is either a list, a cudf DataFrame,
or a dask_cudf DataFrame, then its contents are assumed to be vertex
identifiers to be used for estimation. If k is None (the default), all the
vertices are used to estimate betweenness. Vertices obtained through
sampling or defined as a list will be used as sources for traversals inside
the algorithm.
normalized : bool, optional (default=True)
If true, the betweenness values are normalized by
__2 / (n * (n - 1))__ for undirected Graphs, and
__1 / (n * (n - 1))__ for directed Graphs
where n is the number of nodes in G.
Normalization will ensure that values are in [0, 1],
this normalization scales for the highest possible value where one
edge is crossed by every single shortest path.
weight : cudf.DataFrame, optional (default=None)
Specifies the weights to be used for each edge.
Should contain a mapping between
edges and weights.
(Not Supported)
seed : optional (default=None)
if k is specified and k is an integer, use seed to initialize the
random number generator.
Using None as seed relies on random.seed() behavior: using current
system time
If k is either None or list: seed parameter is ignored
result_dtype : np.float32 or np.float64, optional (default=np.float64)
Indicate the data type of the betweenness centrality scores
Using double automatically switch implementation to "default"
Returns
-------
df : cudf.DataFrame or Dictionary if using NetworkX
GPU data frame containing three cudf.Series of size E: the vertex
identifiers of the sources, the vertex identifies of the destinations
and the corresponding betweenness centrality values.
Please note that the resulting the 'src', 'dst' column might not be
in ascending order.
df['src'] : cudf.Series
Contains the vertex identifiers of the source of each edge
df['dst'] : cudf.Series
Contains the vertex identifiers of the destination of each edge
df['betweenness_centrality'] : cudf.Series
Contains the betweenness centrality of edges
df["edge_id"] : cudf.Series
Contains the edge ids of edges if present.
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> bc = cugraph.edge_betweenness_centrality(G)
"""
if weight is not None:
raise NotImplementedError(
"weighted implementation of edge betweenness "
"centrality not currently supported"
)
if result_dtype not in [np.float32, np.float64]:
raise TypeError("result type can only be np.float32 or np.float64")
G, isNx = ensure_cugraph_obj_for_nx(G)
if not isinstance(k, (cudf.DataFrame, cudf.Series)):
if isinstance(k, list):
vertex_dtype = G.edgelist.edgelist_df.dtypes[0]
k = cudf.Series(k, dtype=vertex_dtype)
if isinstance(k, (cudf.DataFrame, cudf.Series)):
if G.renumbered:
k = G.lookup_internal_vertex_id(k)
# FIXME: src, dst and edge_ids need to be of the same type which should not
# be the case
(
src_vertices,
dst_vertices,
values,
edge_ids,
) = pylibcugraph_edge_betweenness_centrality(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
k=k,
random_state=seed,
normalized=normalized,
do_expensive_check=False,
)
df = cudf.DataFrame()
df["src"] = src_vertices
df["dst"] = dst_vertices
df["betweenness_centrality"] = values
if edge_ids is not None:
df["edge_id"] = edge_ids
if G.renumbered:
df = G.unrenumber(df, "src")
df = G.unrenumber(df, "dst")
if df["betweenness_centrality"].dtype != result_dtype:
df["betweenness_centrality"] = df["betweenness_centrality"].astype(result_dtype)
if G.is_directed() is False:
# select the lower triangle of the df based on src/dst vertex value
lower_triangle = df["src"] >= df["dst"]
# swap the src and dst vertices for the lower triangle only. Because
# this is a symmeterized graph, this operation results in a df with
# multiple src/dst entries.
df["src"][lower_triangle], df["dst"][lower_triangle] = (
df["dst"][lower_triangle],
df["src"][lower_triangle],
)
# overwrite the df with the sum of the values for all alike src/dst
# vertex pairs, resulting in half the edges of the original df from the
# symmeterized graph.
df = df.groupby(by=["src", "dst"]).sum().reset_index()
if isNx is True:
return df_edge_score_to_dictionary(df, "betweenness_centrality")
else:
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/centrality/__init__.py | # Copyright (c) 2019-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.centrality.katz_centrality import katz_centrality
from cugraph.centrality.betweenness_centrality import (
betweenness_centrality,
edge_betweenness_centrality,
)
from cugraph.centrality.degree_centrality import degree_centrality
from cugraph.centrality.eigenvector_centrality import eigenvector_centrality
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/centrality/katz_centrality.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pylibcugraph import katz_centrality as pylibcugraph_katz, ResourceHandle
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_score_to_dictionary,
)
import cudf
import warnings
def katz_centrality(
G, alpha=None, beta=1.0, max_iter=100, tol=1.0e-6, nstart=None, normalized=True
):
"""
Compute the Katz centrality for the nodes of the graph G. This
implementation is based on a relaxed version of Katz defined by Foster
with a reduced computational complexity of O(n+m)
On a directed graph, cuGraph computes the out-edge Katz centrality score.
This is opposite of NetworkX which compute the in-edge Katz centrality
score by default. You can flip the NetworkX edges, using G.reverse,
so that the results match cuGraph.
References
----------
Foster, K.C., Muth, S.Q., Potterat, J.J. et al.
Computational & Mathematical Organization Theory (2001) 7: 275.
https://doi.org/10.1023/A:1013470632383
Katz, L. (1953). A new status index derived from sociometric analysis.
Psychometrika, 18(1), 39-43.
Parameters
----------
G : cuGraph.Graph or networkx.Graph
cuGraph graph descriptor with connectivity information. The graph can
contain either directed or undirected edges.
alpha : float, optional (default=None)
Attenuation factor defaulted to None. If alpha is not specified then
it is internally calculated as 1/(degree_max) where degree_max is the
maximum out degree.
NOTE:
The maximum acceptable value of alpha for convergence
alpha_max = 1/(lambda_max) where lambda_max is the largest
eigenvalue of the graph.
Since lambda_max is always lesser than or equal to degree_max for a
graph, alpha_max will always be greater than or equal to
(1/degree_max). Therefore, setting alpha to (1/degree_max) will
guarantee that it will never exceed alpha_max thus in turn
fulfilling the requirement for convergence.
beta : float, optional (default=None)
Weight scalar added to each vertex's new Katz Centrality score in every
iteration. If beta is not specified then it is set as 1.0.
max_iter : int, optional (default=100)
The maximum number of iterations before an answer is returned. This can
be used to limit the execution time and do an early exit before the
solver reaches the convergence tolerance.
tol : float, optional (default=1.0e-6)
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0e-6.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 1e-2 and 1e-6 are
acceptable.
nstart : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the initial guess for katz centrality.
nstart['vertex'] : cudf.Series
Contains the vertex identifiers
nstart['values'] : cudf.Series
Contains the katz centrality values of vertices
normalized : not supported
If True normalize the resulting katz centrality values
Returns
-------
df : cudf.DataFrame or Dictionary if using NetworkX
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding katz centrality values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['katz_centrality'] : cudf.Series
Contains the katz centrality of vertices
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> kc = cugraph.katz_centrality(G)
"""
G, isNx = ensure_cugraph_obj_for_nx(G, store_transposed=True)
if G.store_transposed is False:
warning_msg = (
"Katz centrality expects the 'store_transposed' flag "
"to be set to 'True' for optimal performance during "
"the graph creation"
)
warnings.warn(warning_msg, UserWarning)
if alpha is None:
degree_max = G.degree()["degree"].max()
alpha = 1 / (degree_max)
if (alpha is not None) and (alpha <= 0.0):
raise ValueError(f"'alpha' must be a positive float or None, " f"got: {alpha}")
elif (not isinstance(beta, float)) or (beta <= 0.0):
raise ValueError(f"'beta' must be a positive float or None, " f"got: {beta}")
if (not isinstance(max_iter, int)) or (max_iter <= 0):
raise ValueError(f"'max_iter' must be a positive integer" f", got: {max_iter}")
if (not isinstance(tol, float)) or (tol <= 0.0):
raise ValueError(f"'tol' must be a positive float, got: {tol}")
if nstart is not None:
if G.renumbered is True:
if len(G.renumber_map.implementation.col_names) > 1:
cols = nstart.columns[:-1].to_list()
else:
cols = "vertex"
nstart = G.add_internal_vertex_id(nstart, "vertex", cols)
nstart = nstart[nstart.columns[0]]
vertices, values = pylibcugraph_katz(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
betas=nstart,
alpha=alpha,
beta=beta,
epsilon=tol,
max_iterations=max_iter,
do_expensive_check=False,
)
vertices = cudf.Series(vertices)
values = cudf.Series(values)
df = cudf.DataFrame()
df["vertex"] = vertices
df["katz_centrality"] = values
if G.renumbered:
df = G.unrenumber(df, "vertex")
if isNx is True:
dict = df_score_to_dictionary(df, "katz_centrality")
return dict
else:
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/traversal/bfs.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import dask_cudf
from pylibcugraph import ResourceHandle
from pylibcugraph import bfs as pylibcugraph_bfs
from cugraph.structure.graph_classes import Graph
from cugraph.utilities import (
ensure_cugraph_obj,
is_matrix_type,
is_cp_matrix_type,
is_nx_graph_type,
cupy_package as cp,
)
def _ensure_args(G, start, i_start, directed):
"""
Ensures the args passed in are usable for the API api_name and returns the
args with proper defaults if not specified, or raises TypeError or
ValueError if incorrectly specified.
"""
# checks common to all input types
if (start is not None) and (i_start is not None):
raise TypeError("cannot specify both 'start' and 'i_start'")
if (start is None) and (i_start is None):
raise TypeError("must specify 'start' or 'i_start', but not both")
start = start if start is not None else i_start
G_type = type(G)
# Check for Graph-type inputs
if G_type is Graph or is_nx_graph_type(G_type):
if directed is not None:
raise TypeError("'directed' cannot be specified for a " "Graph-type input")
# ensure start vertex is valid
invalid_vertex_err = ValueError("A provided vertex was not valid")
if is_nx_graph_type(G_type):
if start not in G:
raise invalid_vertex_err
else:
if not isinstance(start, cudf.DataFrame):
if not isinstance(start, dask_cudf.DataFrame):
vertex_dtype = G.nodes().dtype
start = cudf.DataFrame(
{"starts": cudf.Series(start, dtype=vertex_dtype)}
)
if G.is_renumbered():
validlen = len(
G.renumber_map.to_internal_vertex_id(start, start.columns).dropna()
)
if validlen < len(start):
raise invalid_vertex_err
else:
el = G.edgelist.edgelist_df[["src", "dst"]]
col = start.columns[0]
null_l = (
el.merge(start[col].rename("src"), on="src", how="right")
.dst.isnull()
.sum()
)
null_r = (
el.merge(start[col].rename("dst"), on="dst", how="right")
.src.isnull()
.sum()
)
if null_l + null_r > 0:
raise invalid_vertex_err
if directed is None:
directed = True
return (start, directed)
def _convert_df_to_output_type(df, input_type):
"""
Given a cudf.DataFrame df, convert it to a new type appropriate for the
graph algos in this module, based on input_type.
"""
if input_type is Graph:
return df
elif is_nx_graph_type(input_type):
return df.to_pandas()
elif is_matrix_type(input_type):
# A CuPy/SciPy input means the return value will be a 2-tuple of:
# distance: cupy.ndarray
# predecessor: cupy.ndarray
sorted_df = df.sort_values("vertex")
if is_cp_matrix_type(input_type):
distances = cp.from_dlpack(sorted_df["distance"].to_dlpack())
preds = cp.from_dlpack(sorted_df["predecessor"].to_dlpack())
return (distances, preds)
else:
distances = sorted_df["distance"].to_numpy()
preds = sorted_df["predecessor"].to_numpy()
return (distances, preds)
else:
raise TypeError(f"input type {input_type} is not a supported type.")
def bfs(
G,
start=None,
depth_limit=None,
i_start=None,
directed=None,
return_predecessors=True,
):
"""
Find the distances and predecessors for a breadth first traversal of a
graph. Unlike SSSP, BFS supports unweighted graphs.
Parameters
----------
G : cugraph.Graph, networkx.Graph, CuPy or SciPy sparse matrix
Graph or matrix object, which should contain the connectivity
information. Edge weights, if present, should be single or double
precision floating point values.
start : Integer or list, optional (default=None)
The id of the graph vertex from which the traversal begins, or
if a list, the vertex from which the traversal begins in each
component of the graph. Only one vertex per connected
component of the graph is allowed.
depth_limit : Integer or None, optional (default=None)
Limit the depth of the search
i_start : Integer, optional (default=None)
Identical to start, added for API compatibility. Only start or i_start
can be set, not both.
directed : bool, optional (default=None)
NOTE
For non-Graph-type (eg. sparse matrix) values of G only. Raises
TypeError if used with a Graph object.
If True, then convert the input matrix to a directed cugraph.Graph,
otherwise an undirected cugraph.Graph object will be used.
return_predecessors : bool, optional (default=True)
Whether to return the predecessors for each vertex (returns -1
for each vertex otherwise)
Returns
-------
Return value type is based on the input type. If G is a cugraph.Graph,
returns:
cudf.DataFrame
df['vertex'] vertex IDs
df['distance'] path distance for each vertex from the starting vertex
df['predecessor'] for each i'th position in the column, the vertex ID
immediately preceding the vertex at position i in the 'vertex' column
If G is a networkx.Graph, returns:
pandas.DataFrame with contents equivalent to the cudf.DataFrame
described above.
If G is a CuPy or SciPy matrix, returns:
a 2-tuple of CuPy ndarrays (if CuPy matrix input) or Numpy ndarrays (if
SciPy matrix input) representing:
distance: cupy or numpy ndarray
ndarray of shortest distances between source and vertex.
predecessor: cupy or numpy ndarray
ndarray of predecessors of a vertex on the path from source, which
can be used to reconstruct the shortest paths.
...or if return_sp_counter is True, returns a 3-tuple with the above two
arrays plus:
sp_counter: cupy or numpy ndarray
ndarray of number of shortest paths leading to each vertex.
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> df = cugraph.bfs(G, 0)
"""
(start, directed) = _ensure_args(G, start, i_start, directed)
# FIXME: allow nx_weight_attr to be specified
(G, input_type) = ensure_cugraph_obj(
G, nx_weight_attr="weight", matrix_graph_type=Graph(directed=directed)
)
# The BFS C++ extension assumes the start vertex is a cudf.Series object,
# and operates on internal vertex IDs if renumbered.
is_dataframe = isinstance(start, cudf.DataFrame) or isinstance(
start, dask_cudf.DataFrame
)
if G.renumbered is True:
if is_dataframe:
start = G.lookup_internal_vertex_id(start, start.columns)
else:
start = G.lookup_internal_vertex_id(cudf.Series(start))
else:
if is_dataframe:
start = start[start.columns[0]]
else:
vertex_dtype = G.nodes().dtype
start = cudf.Series(start, dtype=vertex_dtype)
distances, predecessors, vertices = pylibcugraph_bfs(
handle=ResourceHandle(),
graph=G._plc_graph,
sources=start,
direction_optimizing=False,
depth_limit=depth_limit if depth_limit is not None else -1,
compute_predecessors=return_predecessors,
do_expensive_check=False,
)
result_df = cudf.DataFrame(
{
"vertex": cudf.Series(vertices),
"distance": cudf.Series(distances),
"predecessor": cudf.Series(predecessors),
}
)
if G.renumbered:
result_df = G.unrenumber(result_df, "vertex")
result_df = G.unrenumber(result_df, "predecessor")
result_df.fillna(-1, inplace=True)
return _convert_df_to_output_type(result_df, input_type)
def bfs_edges(G, source, reverse=False, depth_limit=None, sort_neighbors=None):
"""
Find the distances and predecessors for a breadth first traversal of a
graph.
Parameters
----------
G : cugraph.Graph, networkx.Graph, CuPy or SciPy sparse matrix
Graph or matrix object, which should contain the connectivity
information. Edge weights, if present, should be single or double
precision floating point values.
source : Integer
The starting vertex index
reverse : boolean, optional (default=False)
If a directed graph, then process edges in a reverse direction
Currently not implemented
depth_limit : Int or None, optional (default=None)
Limit the depth of the search
sort_neighbors : None or Function, optional (default=None)
Currently not implemented
Returns
-------
Return value type is based on the input type. If G is a cugraph.Graph,
returns:
cudf.DataFrame
df['vertex'] vertex IDs
df['distance'] path distance for each vertex from the starting vertex
df['predecessor'] for each i'th position in the column, the vertex ID
immediately preceding the vertex at position i in the 'vertex' column
If G is a networkx.Graph, returns:
pandas.DataFrame with contents equivalent to the cudf.DataFrame
described above.
If G is a CuPy or SciPy matrix, returns:
a 2-tuple of CuPy ndarrays (if CuPy matrix input) or Numpy ndarrays (if
SciPy matrix input) representing:
distance: cupy or numpy ndarray
ndarray of shortest distances between source and vertex.
predecessor: cupy or numpy ndarray
ndarray of predecessors of a vertex on the path from source, which
can be used to reconstruct the shortest paths.
...or if return_sp_counter is True, returns a 3-tuple with the above two
arrays plus:
sp_counter: cupy or numpy ndarray
ndarray of number of shortest paths leading to each vertex.
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> df = cugraph.bfs_edges(G, 0)
"""
if reverse is True:
raise NotImplementedError(
"reverse processing of graph is currently not supported"
)
return bfs(G, source, depth_limit)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/traversal/ms_bfs.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cudf
import warnings
def _get_feasibility(G, sources, components=None, depth_limit=None):
"""
Evaluate the feasibility for breadth first traversal from multiple sources
in a graph.
Parameters
----------
G : cugraph.Graph
The adjacency list will be computed if not already present.
sources : cudf.Series
Subset of vertices from which the traversals start. A BFS is run for
each source in the Series.
The size of the series should be at least one and cannot exceed
the size of the graph.
depth_limit : Integer, optional, default=None
Limit the depth of the search. Terminates if no more vertices are
reachable within the distance of depth_limit
components : cudf.DataFrame, optional, default=None
GPU Dataframe containing the component information.
Passing this information may impact the return type.
When no component information is passed BFS uses one component
behavior settings.
components['vertex'] : cudf.Series
vertex IDs
components['color'] : cudf.Series
component IDs/color for vertices.
Returns
-------
mem_footprint : integer
Estimated memory foot print size in Bytes
"""
# Fixme not implemented in RMM yet
# using 96GB upper bound for now
# mem = get_device_memory_info()
mem = 9.6e10
n_sources = sources.size
V = G.number_of_vertices()
E = G.number_of_edges()
mean_component_sz = V
n_components = 1
# Retreive types
size_of_v = 4
size_of_e = 4
size_of_w = 0
if G.adjlist.weights is not None:
if G.adjlist.weights.dtype is np.float64:
size_of_w = 8
else:
size_of_w = 4
if G.adjlist.offsets.dtype is np.float64:
size_of_v = 8
if G.adjlist.indices.dtype is np.float64:
size_of_e = 8
# Graph size
G_sz = E * size_of_e + E * size_of_w + V * size_of_v
# The impact of depth limit depends on the sparsity
# pattern and diameter. We cannot leverage it without
# traversing the full dataset a the moment.
# dense output
output_sz = n_sources * 2 * V * size_of_v
# sparse output
if components is not None:
tmp = components["color"].value_counts()
n_components = tmp.size
if n_sources / n_components > 100:
warnings.warn("High number of seeds per component result in large output.")
mean_component_sz = tmp.mean()
output_sz = mean_component_sz * n_sources * 2 * size_of_e
# counting 10% for context, handle and temporary allocations
mem_footprint = (G_sz + output_sz) * 1.1
if mem_footprint > mem:
warnings.warn(f"Cannot execute in-memory :{mem_footprint} Bytes")
return mem_footprint
def concurrent_bfs(Graphs, sources, depth_limit=None, offload=False):
"""
Find the breadth first traversals of multiple graphs with multiple sources
in each graph.
Parameters
----------
Graphs : list of cugraph.Graph
The adjacency lists will be computed if not already present.
sources : list of cudf.Series
For each graph, subset of vertices from which the traversals start.
A BFS is run in Graphs[i] for each source in the Series at sources[i].
The size of this list must match the size of the graph list.
The size of each Series (ie. the number of sources per graph)
is flexible, but cannot exceed the size of the corresponding graph.
depth_limit : Integer, optional, default=None
Limit the depth of the search. Terminates if no more vertices are
reachable within the distance of depth_limit
offload : boolean, optional, default=False
Indicates if output should be written to the disk.
When not provided, the algorithms decides if offloading is needed
based on the input parameters.
Returns
-------
Return type is decided based on the input parameters (size of
sources, size of the graph, number of graphs and offload setting)
If G is a cugraph.Graph and output fits in memory:
BFS_edge_lists : cudf.DataFrame
GPU data frame containing all BFS edges
source_offsets: cudf.Series
Series containing the starting offset in the returned edge list
for each source.
If offload is True, or if the output does not fit in memory :
Writes csv files containing BFS output to the disk.
"""
raise NotImplementedError(
"concurrent_bfs is coming soon! Please up vote the github issue 1465\
to help us prioritize"
)
if not isinstance(Graphs, list):
raise TypeError("Graphs should be a list of cugraph.Graph")
if not isinstance(sources, list):
raise TypeError("sources should be a list of cudf.Series")
if len(Graphs) != len(sources):
raise ValueError(
"The size of the sources list must match\
the size of the graph list."
)
if offload is True:
raise NotImplementedError(
"Offloading is coming soon! Please up vote the github issue 1461\
to help us prioritize"
)
# Consolidate graphs in a single graph and record components
# Renumber and concatenate sources in a single df
# Call multi_source_bfs
# multi_source_bfs(
# G,
# sources,
# components=components,
# depth_limit=depth_limit,
# offload=offload,
# )
def multi_source_bfs(G, sources, components=None, depth_limit=None, offload=False):
"""
Find the breadth first traversal from multiple sources in a graph.
Parameters
----------
G : cugraph.Graph
The adjacency list will be computed if not already present.
sources : cudf.Series
Subset of vertices from which the traversals start. A BFS is run for
each source in the Series.
The size of the series should be at least one and cannot exceed the
size of the graph.
depth_limit : Integer, optional, default=None
Limit the depth of the search. Terminates if no more vertices are
reachable within the distance of depth_limit
components : cudf.DataFrame, optional, default=None
GPU Dataframe containing the component information.
Passing this information may impact the return type.
When no component information is passed BFS uses one component
behavior settings.
components['vertex'] : cudf.Series
vertex IDs
components['color'] : cudf.Series
component IDs/color for vertices.
offload : boolean, optional, default=False
Indicates if output should be written to the disk.
When not provided, the algorithms decides if offloading is needed
based on the input parameters.
Returns
-------
Return value type is decided based on the input parameters (size of
sources, size of the graph, number of components and offload setting)
If G is a cugraph.Graph, returns :
cudf.DataFrame
df['vertex'] vertex IDs
df['distance_<source>'] path distance for each vertex from the
starting vertex. One column per source.
df['predecessor_<source>'] for each i'th position in the column,
the vertex ID immediately preceding the vertex at position i in
the 'vertex' column. One column per source.
If G is a cugraph.Graph and component information is present returns :
BFS_edge_lists : cudf.DataFrame
GPU data frame containing all BFS edges
source_offsets: cudf.Series
Series containing the starting offset in the returned edge list
for each source.
If offload is True, or if the output does not fit in memory :
Writes csv files containing BFS output to the disk.
"""
raise NotImplementedError(
"concurrent_bfs is coming soon! Please up vote the github issue 1465\
to help us prioritize"
)
# if components is not None:
# null_check(components["vertex"])
# null_check(components["colors"])
#
# if depth_limit is not None:
# raise NotImplementedError(
# "depth limit implementation of BFS is not currently supported"
# )
# if offload is True:
# raise NotImplementedError(
# "Offloading is coming soon! Please up vote the github issue 1461
# to help us prioritize"
# )
if isinstance(sources, list):
sources = cudf.Series(sources)
if G.renumbered is True:
sources = G.lookup_internal_vertex_id(cudf.Series(sources))
if not G.adjlist:
G.view_adj_list()
# Memory footprint check
footprint = _get_feasibility(
G, sources, components=components, depth_limit=depth_limit
)
print(footprint)
# Call multi_source_bfs
# FIXME remove when implemented
# raise NotImplementedError("Commming soon")
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/traversal/__init__.py | # Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.traversal.bfs import bfs
from cugraph.traversal.bfs import bfs_edges
from cugraph.traversal.sssp import (
sssp,
shortest_path,
filter_unreachable,
shortest_path_length,
)
from cugraph.traversal.ms_bfs import concurrent_bfs, multi_source_bfs
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/traversal/sssp.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cudf
from cugraph.structure import Graph, MultiGraph
from cugraph.utilities import (
ensure_cugraph_obj,
is_matrix_type,
is_cp_matrix_type,
is_nx_graph_type,
cupy_package as cp,
)
from pylibcugraph import sssp as pylibcugraph_sssp, ResourceHandle
def _ensure_args(
G, source, method, directed, return_predecessors, unweighted, overwrite, indices
):
"""
Ensures the args passed in are usable for the API api_name and returns the
args with proper defaults if not specified, or raises TypeError or
ValueError if incorrectly specified.
"""
# checks common to all input types
if (method is not None) and (method != "auto"):
raise ValueError("only 'auto' is currently accepted for method")
if (indices is not None) and (type(indices) == list):
raise ValueError("indices currently cannot be a list-like type")
if (indices is not None) and (source is not None):
raise TypeError("cannot specify both 'source' and 'indices'")
if (indices is None) and (source is None):
raise TypeError("must specify 'source' or 'indices', but not both")
G_type = type(G)
# Check for Graph-type inputs
if G_type is Graph or is_nx_graph_type(G_type):
# FIXME: Improve Graph-type checking
exc_value = "'%s' cannot be specified for a Graph-type input"
if directed is not None:
raise TypeError(exc_value % "directed")
if return_predecessors is not None:
raise TypeError(exc_value % "return_predecessors")
if unweighted is not None:
raise TypeError(exc_value % "unweighted")
if overwrite is not None:
raise TypeError(exc_value % "overwrite")
# Ensure source vertex is valid
invalid_vertex_err = ValueError(
f"Vertex {source} is not valid for the NetworkX Graph"
)
if is_nx_graph_type(G_type) and source not in G:
raise invalid_vertex_err
elif indices is None and not G.has_node(source):
raise invalid_vertex_err
directed = False
# Check for non-Graph-type inputs
else:
if (directed is not None) and (type(directed) != bool):
raise ValueError("'directed' must be a bool")
if (return_predecessors is not None) and (type(return_predecessors) != bool):
raise ValueError("'return_predecessors' must be a bool")
if (unweighted is not None) and (unweighted is not True):
raise ValueError("'unweighted' currently must be True if " "specified")
if (overwrite is not None) and (overwrite is not False):
raise ValueError("'overwrite' currently must be False if " "specified")
source = source if source is not None else indices
if return_predecessors is None:
return_predecessors = True
return (source, directed, return_predecessors)
def _convert_df_to_output_type(df, input_type, return_predecessors):
"""
Given a cudf.DataFrame df, convert it to a new type appropriate for the
graph algos in this module, based on input_type.
return_predecessors is only used for return values from cupy/scipy input
types.
"""
if input_type in [Graph, MultiGraph]:
return df
elif is_nx_graph_type(input_type):
return df.to_pandas()
elif is_matrix_type(input_type):
# A CuPy/SciPy input means the return value will be a 2-tuple of:
# distance: cupy.ndarray
# predecessor: cupy.ndarray
sorted_df = df.sort_values("vertex")
if return_predecessors:
if is_cp_matrix_type(input_type):
return (
cp.from_dlpack(sorted_df["distance"].to_dlpack()),
cp.from_dlpack(sorted_df["predecessor"].to_dlpack()),
)
else:
return (
sorted_df["distance"].to_numpy(),
sorted_df["predecessor"].to_numpy(),
)
else:
if is_cp_matrix_type(input_type):
return cp.from_dlpack(sorted_df["distance"].to_dlpack())
else:
return sorted_df["distance"].to_numpy()
else:
raise TypeError(f"input type {input_type} is not a supported type.")
def sssp(
G,
source=None,
method=None,
directed=None,
return_predecessors=None,
unweighted=None,
overwrite=None,
indices=None,
cutoff=None,
edge_attr="weight",
):
"""
Compute the distance and predecessors for shortest paths from the specified
source to all the vertices in the graph. The distances column will store
the distance from the source to each vertex. The predecessors column will
store each vertex's predecessor in the shortest path. Vertices that are
unreachable will have a distance of infinity denoted by the maximum value
of the data type and the predecessor set as -1. The source vertex's
predecessor is also set to -1. Graphs with negative weight cycles are not
supported. Unweighted graphs are also unsupported.
For finding shortest paths on an unweighted graph, use BFS instead.
Parameters
----------
graph : cugraph.Graph, networkx.Graph, CuPy or SciPy sparse matrix Graph or
matrix object, which should contain the connectivity information. Edge
weights, if present, should be single or double precision floating
point values.
The current implementation only supports weighted graphs.
source : int
Index of the source vertex.
cutoff : double, optional (default=None)
Maximum edge weight sum considered by the algorithm
edge_attr : str, optional (default='weight')
The name of the edge attribute that represents the weight of an edge.
This currently applies only when G is a NetworkX Graph.
Default value is 'weight', which follows NetworkX convention.
Returns
-------
Return value type is based on the input type. If G is a cugraph.Graph,
returns:
cudf.DataFrame
df['vertex']
vertex id
df['distance']
gives the path distance from the starting vertex
df['predecessor']
the vertex it was reached from
If G is a networkx.Graph, returns:
pandas.DataFrame with contents equivalent to the cudf.DataFrame
described above.
If G is a CuPy or SciPy matrix, returns:
a 2-tuple of CuPy ndarrays (if CuPy matrix input) or Numpy ndarrays (if
SciPy matrix input) representing:
distance: cupy or numpy ndarray
ndarray of shortest distances between source and vertex.
predecessor: cupy or numpy ndarray
ndarray of predecessors of a vertex on the path from source, which
can be used to reconstruct the shortest paths.
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> distances = cugraph.sssp(G, 0)
>>> distances
distance vertex predecessor
... ... ... ...
... ... ... ...
... ... ... ...
"""
(source, directed, return_predecessors) = _ensure_args(
G, source, method, directed, return_predecessors, unweighted, overwrite, indices
)
(G, input_type) = ensure_cugraph_obj(
G, nx_weight_attr=edge_attr, matrix_graph_type=Graph(directed=directed)
)
if not G.is_weighted():
err_msg = (
"'SSSP' requires the input graph to be weighted."
"'BFS' should be used instead of 'SSSP' for unweighted graphs."
)
raise RuntimeError(err_msg)
if not G.has_node(source):
raise ValueError("Graph does not contain source vertex")
if G.renumbered:
if isinstance(source, cudf.DataFrame):
source = G.lookup_internal_vertex_id(source, source.columns).iloc[0]
else:
source = G.lookup_internal_vertex_id(cudf.Series([source]))[0]
if cutoff is None:
cutoff = np.inf
# compute_predecessors MUST be true in the current version of sssp
vertices, distances, predecessors = pylibcugraph_sssp(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
source=source,
cutoff=cutoff,
compute_predecessors=True,
do_expensive_check=False,
)
df = cudf.DataFrame(
{
"distance": cudf.Series(distances),
"vertex": cudf.Series(vertices),
"predecessor": cudf.Series(predecessors),
}
)
if G.renumbered:
df = G.unrenumber(df, "vertex")
df = G.unrenumber(df, "predecessor")
df.fillna(-1, inplace=True)
return _convert_df_to_output_type(df, input_type, return_predecessors)
def filter_unreachable(df):
"""
Remove unreachable vertices from the result of SSSP or BFS
Parameters
----------
df : cudf.DataFrame
cudf.DataFrame that is the output of SSSP or BFS
Returns
-------
df : filtered cudf.DataFrame with only reachable vertices
df['vertex'][i] gives the vertex id of the i'th vertex.
df['distance'][i] gives the path distance for the i'th vertex from the
starting vertex.
df['predecessor'][i] gives the vertex that was reached before the i'th
vertex in the traversal.
"""
if "distance" not in df:
raise KeyError("No distance column found in input data frame")
if np.issubdtype(df["distance"].dtype, np.integer):
max_val = np.iinfo(df["distance"].dtype).max
return df[df.distance != max_val]
elif np.issubdtype(df["distance"].dtype, np.inexact):
max_val = np.finfo(df["distance"].dtype).max
return df[df.distance != max_val]
else:
raise TypeError("distance type unsupported")
def shortest_path(
G,
source=None,
method=None,
directed=None,
return_predecessors=None,
unweighted=None,
overwrite=None,
indices=None,
):
"""
Alias for sssp(), provided for API compatibility with NetworkX. See sssp()
for details.
"""
return sssp(
G, source, method, directed, return_predecessors, unweighted, overwrite, indices
)
def shortest_path_length(G, source, target=None):
"""
Compute the distance from a source vertex to one or all vertexes in graph.
Uses Single Source Shortest Path (SSSP).
Parameters
----------
graph : cuGraph.Graph, NetworkX.Graph, or CuPy sparse COO matrix
cuGraph graph descriptor with connectivity information. Edge weights,
if present, should be single or double precision floating point values.
source : Dependant on graph type. Index of the source vertex.
If graph is an instance of cuGraph.Graph or CuPy sparse COO matrix:
int
If graph is an instance of a NetworkX.Graph:
str
target: Dependant on graph type. Vertex to find distance to.
If graph is an instance of cuGraph.Graph or CuPy sparse COO matrix:
int
If graph is an instance of a NetworkX.Graph:
str
Returns
-------
Return value type is based on the input type.
If target is None, returns:
cudf.DataFrame
df['vertex']
vertex id
df['distance']
gives the path distance from the starting vertex
If target is not None, returns:
Distance from source to target vertex.
"""
# verify target is in graph before traversing
if target is not None:
if not hasattr(G, "has_node"):
# G is a cupy coo_matrix. Extract maximum possible vertex value
as_matrix = G.toarray()
if target < 0 or target >= max(as_matrix.shape[0], as_matrix.shape[1]):
raise ValueError("Graph does not contain target vertex")
elif not G.has_node(target):
# G is an instance of cugraph or networkx graph
raise ValueError("Graph does not contain target vertex")
df = sssp(G, source)
if isinstance(df, tuple):
# cupy path, df is tuple of (distance, predecessor)
if target:
return df[0][target - 1]
results = cudf.DataFrame()
results["vertex"] = range(df[0].shape[0])
results["distance"] = df[0]
return results
else:
# cugraph and networkx path
if target:
target_distance = df.loc[df["vertex"] == target]
return target_distance.iloc[0]["distance"]
results = cudf.DataFrame()
results["vertex"] = df["vertex"]
results["distance"] = df["distance"]
return results
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_prediction/overlap.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_edge_score_to_dictionary,
renumber_vertex_pair,
)
import cudf
import warnings
from typing import Union, Iterable
from pylibcugraph import (
overlap_coefficients as pylibcugraph_overlap_coefficients,
)
from pylibcugraph import ResourceHandle
from cugraph.structure import Graph
from cugraph.utilities.utils import import_optional
# FIXME: the networkx.Graph type used in type annotations is specified
# using a string literal to avoid depending on and importing networkx.
# Instead, networkx is imported optionally, which may cause a problem
# for a type checker if run in an environment where networkx is not installed.
networkx = import_optional("networkx")
# FIXME: Move this function to the utility module so that it can be
# shared by other algos
def ensure_valid_dtype(input_graph, vertex_pair):
vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0]
vertex_pair_dtypes = vertex_pair.dtypes
if vertex_pair_dtypes[0] != vertex_dtype or vertex_pair_dtypes[1] != vertex_dtype:
warning_msg = (
"Overlap requires 'vertex_pair' to match the graph's 'vertex' type. "
f"input graph's vertex type is: {vertex_dtype} and got "
f"'vertex_pair' of type: {vertex_pair_dtypes}."
)
warnings.warn(warning_msg, UserWarning)
vertex_pair = vertex_pair.astype(vertex_dtype)
return vertex_pair
def overlap_coefficient(
G: Union[Graph, "networkx.Graph"],
ebunch: Union[cudf.DataFrame, Iterable[Union[int, str, float]]] = None,
do_expensive_check: bool = False, # deprecated
):
"""
Compute overlap coefficient.
Parameters
----------
G : cugraph.Graph or NetworkX.Graph
cuGraph or NetworkX Graph instance, should contain the connectivity
information as an edge list. The graph should be undirected where an
undirected edge is represented by a directed edge in both direction.
The adjacency list will be computed if not already present.
This implementation only supports undirected, non-multi edge Graph.
ebunch : cudf.DataFrame or iterable of node pairs, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices or iterable of 2-tuples (u, v) where u and v are nodes in
the graph.
If provided, the Overlap coefficient is computed for the given vertex
pairs. Otherwise, the current implementation computes the overlap
coefficient for all adjacent vertices in the graph.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the overlap weights. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
ddf['first']: dask_cudf.Series
The first vertex ID of each pair (will be identical to first if specified).
ddf['second']: dask_cudf.Series
The second vertex ID of each pair (will be identical to second if
specified).
ddf['overlap_coeff']: dask_cudf.Series
The computed overlap coefficient between the first and the second
vertex ID.
Examples
--------
>>> from cugraph.datasets import karate
>>> from cugraph import overlap_coefficient
>>> G = karate.get_graph(download=True, ignore_weights=True)
>>> df = overlap_coefficient(G)
"""
if do_expensive_check:
warnings.warn(
"do_expensive_check is deprecated since vertex IDs are no longer "
"required to be consecutively numbered",
FutureWarning,
)
vertex_pair = None
G, isNx = ensure_cugraph_obj_for_nx(G)
# FIXME: What is the logic behind this since the docstrings mention that 'G' and
# 'ebunch'(if not None) are respectively of type cugraph.Graph and cudf.DataFrame?
if isNx is True and ebunch is not None:
vertex_pair = cudf.DataFrame(ebunch)
df = overlap(G, vertex_pair)
if isNx is True:
df = df_edge_score_to_dictionary(
df, k="overlap_coeff", src="first", dst="second"
)
return df
def overlap(
input_graph: Graph,
vertex_pair: cudf.DataFrame = None,
do_expensive_check: bool = False, # deprecated
use_weight: bool = False,
):
"""
Compute the Overlap Coefficient between each pair of vertices connected by
an edge, or between arbitrary pairs of vertices specified by the user.
Overlap Coefficient is defined between two sets as the ratio of the volume
of their intersection divided by the smaller of their two volumes. In the
context of graphs, the neighborhood of a vertex is seen as a set. The
Overlap Coefficient weight of each edge represents the strength of
connection between vertices based on the relative similarity of their
neighbors. If first is specified but second is not, or vice versa, an
exception will be thrown.
cugraph.overlap, in the absence of a specified vertex pair list, will
compute the two_hop_neighbors of the entire graph to construct a vertex pair
list and will return the overlap coefficient for those vertex pairs. This is
not advisable as the vertex_pairs can grow exponentially with respect to the
size of the datasets
Parameters
----------
input_graph : cugraph.Graph
cuGraph Graph instance, should contain the connectivity information
as an edge list. The adjacency list will be computed if not already
present.
This implementation only supports undirected, non-multi edge Graph.
vertex_pair : cudf.DataFrame, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices. If provided, the overlap coefficient is computed for the
given vertex pairs, else, it is computed for all vertex pairs.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
use_weight : bool, optional (default=False)
Flag to indicate whether to compute weighted overlap (if use_weight==True)
or un-weighted overlap (if use_weight==False).
'input_graph' must be weighted if 'use_weight=True'.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the Overlap coefficients. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair (will be identical to first if specified).
df['second'] : cudf.Series
The second vertex ID of each pair (will be identical to second if
specified).
df['overlap_coeff'] : cudf.Series
The computed overlap coefficient between the first and the second
vertex ID.
Examples
--------
>>> from cugraph.datasets import karate
>>> from cugraph import overlap
>>> input_graph = karate.get_graph(download=True, ignore_weights=True)
>>> df = overlap(input_graph)
"""
if do_expensive_check:
warnings.warn(
"do_expensive_check is deprecated since vertex IDs are no longer "
"required to be consecutively numbered",
FutureWarning,
)
if input_graph.is_directed():
raise ValueError("Input must be an undirected Graph.")
if vertex_pair is None:
# Call two_hop neighbor of the entire graph
vertex_pair = input_graph.get_two_hop_neighbors()
v_p_num_col = len(vertex_pair.columns)
if isinstance(vertex_pair, cudf.DataFrame):
vertex_pair = renumber_vertex_pair(input_graph, vertex_pair)
vertex_pair = ensure_valid_dtype(input_graph, vertex_pair)
src_col_name = vertex_pair.columns[0]
dst_col_name = vertex_pair.columns[1]
first = vertex_pair[src_col_name]
second = vertex_pair[dst_col_name]
elif vertex_pair is not None:
raise ValueError("vertex_pair must be a cudf dataframe")
first, second, overlap_coeff = pylibcugraph_overlap_coefficients(
resource_handle=ResourceHandle(),
graph=input_graph._plc_graph,
first=first,
second=second,
use_weight=use_weight,
do_expensive_check=False,
)
if input_graph.renumbered:
vertex_pair = input_graph.unrenumber(
vertex_pair, src_col_name, preserve_order=True
)
vertex_pair = input_graph.unrenumber(
vertex_pair, dst_col_name, preserve_order=True
)
if v_p_num_col == 2:
# single column vertex
vertex_pair = vertex_pair.rename(
columns={src_col_name: "first", dst_col_name: "second"}
)
df = vertex_pair
df["overlap_coeff"] = cudf.Series(overlap_coeff)
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_prediction/wjaccard.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.link_prediction import jaccard
import cudf
import warnings
from cugraph.structure import Graph
from cugraph.utilities.utils import import_optional
# FIXME: the networkx.Graph type used in type annotations is specified
# using a string literal to avoid depending on and importing networkx.
# Instead, networkx is imported optionally, which may cause a problem
# for a type checker if run in an environment where networkx is not installed.
networkx = import_optional("networkx")
# FIXME: Move this function to the utility module so that it can be
# shared by other algos
def ensure_valid_dtype(input_graph, vertex_pair):
vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0]
vertex_pair_dtypes = vertex_pair.dtypes
if vertex_pair_dtypes[0] != vertex_dtype or vertex_pair_dtypes[1] != vertex_dtype:
warning_msg = (
"Jaccard requires 'vertex_pair' to match the graph's 'vertex' type. "
f"input graph's vertex type is: {vertex_dtype} and got "
f"'vertex_pair' of type: {vertex_pair_dtypes}."
)
warnings.warn(warning_msg, UserWarning)
vertex_pair = vertex_pair.astype(vertex_dtype)
return vertex_pair
def jaccard_w(
input_graph: Graph,
weights: cudf.DataFrame = None, # deprecated
vertex_pair: cudf.DataFrame = None,
do_expensive_check: bool = False, # deprecated
):
"""
Compute the weighted Jaccard similarity between each pair of vertices
connected by an edge, or between arbitrary pairs of vertices specified by
the user. Jaccard similarity is defined between two sets as the ratio of
the volume of their intersection divided by the volume of their union. In
the context of graphs, the neighborhood of a vertex is seen as a set. The
Jaccard similarity weight of each edge represents the strength of
connection between vertices based on the relative similarity of their
neighbors. If first is specified but second is not, or vice versa, an
exception will be thrown.
NOTE: This algorithm doesn't currently support datasets with vertices that
are not (re)numebred vertices from 0 to V-1 where V is the total number of
vertices as this creates isolated vertices.
Parameters
----------
input_graph : cugraph.Graph
cuGraph Graph instance , should contain the connectivity information
as an edge list (edge weights are not used for this algorithm). The
adjacency list will be computed if not already present.
weights : cudf.DataFrame
Specifies the weights to be used for each vertex.
Vertex should be represented by multiple columns for multi-column
vertices.
weights['vertex'] : cudf.Series
Contains the vertex identifiers
weights['weight'] : cudf.Series
Contains the weights of vertices
vertex_pair : cudf.DataFrame, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices. If provided, the jaccard coefficient is computed for the
given vertex pairs, else, it is computed for all vertex pairs.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the Jaccard weights. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair.
df['second'] : cudf.Series
The second vertex ID of each pair.
df['jaccard_coeff'] : cudf.Series
The computed weighted Jaccard coefficient between the first and the
second vertex ID.
Examples
--------
>>> import random
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> # Create a dataframe containing the vertices with their
>>> # corresponding weight
>>> weights = cudf.DataFrame()
>>> # Sample 10 random vertices from the graph and drop duplicates if
>>> # there are any to avoid duplicates vertices with different weight
>>> # value in the 'weights' dataframe
>>> weights['vertex'] = G.nodes().sample(n=10).drop_duplicates()
>>> # Reset the indices and drop the index column
>>> weights.reset_index(inplace=True, drop=True)
>>> # Create a weight column with random weights
>>> weights['weight'] = [random.random() for w in range(
... len(weights['vertex']))]
>>> df = cugraph.jaccard_w(G, weights)
"""
warning_msg = (
"jaccard_w is deprecated. To compute weighted jaccard, please use "
"jaccard(input_graph, vertex_pair=False, use_weight=True)"
)
warnings.warn(warning_msg, FutureWarning)
return jaccard(input_graph, vertex_pair, do_expensive_check, use_weight=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_prediction/wsorensen.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.link_prediction import sorensen
import cudf
import warnings
from cugraph.structure import Graph
from cugraph.utilities.utils import import_optional
# FIXME: the networkx.Graph type used in type annotations is specified
# using a string literal to avoid depending on and importing networkx.
# Instead, networkx is imported optionally, which may cause a problem
# for a type checker if run in an environment where networkx is not installed.
networkx = import_optional("networkx")
def sorensen_w(
input_graph: Graph,
weights: cudf.DataFrame = None, # deprecated
vertex_pair: cudf.DataFrame = None,
do_expensive_check: bool = False, # deprecated
):
"""
Compute the weighted Sorensen similarity between each pair of vertices
connected by an edge, or between arbitrary pairs of vertices specified by
the user. Sorensen coefficient is defined between two sets as the ratio of
twice the volume of their intersection divided by the volume of each set.
NOTE: This algorithm doesn't currently support datasets with vertices that
are not (re)numebred vertices from 0 to V-1 where V is the total number of
vertices as this creates isolated vertices.
Parameters
----------
input_graph : cugraph.Graph
cuGraph Graph instance, should contain the connectivity information
as an edge list (edge weights are not used for this algorithm). The
adjacency list will be computed if not already present.
weights : cudf.DataFrame
Specifies the weights to be used for each vertex.
Vertex should be represented by multiple columns for multi-column
vertices.
weights['vertex'] : cudf.Series
Contains the vertex identifiers
weights['weight'] : cudf.Series
Contains the weights of vertices
vertex_pair : cudf.DataFrame, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices. If provided, the sorensen coefficient is computed for the
given vertex pairs, else, it is computed for all vertex pairs.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the Sorensen weights. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair.
df['second'] : cudf.Series
The second vertex ID of each pair.
df['sorensen_coeff'] : cudf.Series
The computed weighted Sorensen coefficient between the first and the
second vertex ID.
Examples
--------
>>> import random
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> # Create a dataframe containing the vertices with their
>>> # corresponding weight
>>> weights = cudf.DataFrame()
>>> # Sample 10 random vertices from the graph and drop duplicates if
>>> # there are any to avoid duplicates vertices with different weight
>>> # value in the 'weights' dataframe
>>> weights['vertex'] = G.nodes().sample(n=10).drop_duplicates()
>>> # Reset the indices and drop the index column
>>> weights.reset_index(inplace=True, drop=True)
>>> # Create a weight column with random weights
>>> weights['weight'] = [random.random() for w in range(
... len(weights['vertex']))]
>>> df = cugraph.sorensen_w(G, weights)
"""
warning_msg = (
"sorensen_w is deprecated. To compute weighted sorensen, please use "
"sorensen(input_graph, vertex_pair=False, use_weight=True)"
)
warnings.warn(warning_msg, FutureWarning)
return sorensen(input_graph, vertex_pair, use_weight=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_prediction/jaccard.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_edge_score_to_dictionary,
renumber_vertex_pair,
)
import cudf
import warnings
from typing import Union, Iterable
from pylibcugraph import (
jaccard_coefficients as pylibcugraph_jaccard_coefficients,
)
from pylibcugraph import ResourceHandle
from cugraph.structure import Graph
from cugraph.utilities.utils import import_optional
# FIXME: the networkx.Graph type used in type annotations is specified
# using a string literal to avoid depending on and importing networkx.
# Instead, networkx is imported optionally, which may cause a problem
# for a type checker if run in an environment where networkx is not installed.
networkx = import_optional("networkx")
# FIXME: Move this function to the utility module so that it can be
# shared by other algos
def ensure_valid_dtype(input_graph, vertex_pair):
vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0]
vertex_pair_dtypes = vertex_pair.dtypes
if vertex_pair_dtypes[0] != vertex_dtype or vertex_pair_dtypes[1] != vertex_dtype:
warning_msg = (
"Jaccard requires 'vertex_pair' to match the graph's 'vertex' type. "
f"input graph's vertex type is: {vertex_dtype} and got "
f"'vertex_pair' of type: {vertex_pair_dtypes}."
)
warnings.warn(warning_msg, UserWarning)
vertex_pair = vertex_pair.astype(vertex_dtype)
return vertex_pair
def jaccard(
input_graph: Graph,
vertex_pair: cudf.DataFrame = None,
do_expensive_check: bool = False, # deprecated
use_weight: bool = False,
):
"""
Compute the Jaccard similarity between each pair of vertices connected by
an edge, or between arbitrary pairs of vertices specified by the user.
Jaccard similarity is defined between two sets as the ratio of the volume
of their intersection divided by the volume of their union. In the context
of graphs, the neighborhood of a vertex is seen as a set. The Jaccard
similarity weight of each edge represents the strength of connection
between vertices based on the relative similarity of their neighbors. If
first is specified but second is not, or vice versa, an exception will be
thrown.
NOTE: If the vertex_pair parameter is not specified then the behavior
of cugraph.jaccard is different from the behavior of
networkx.jaccard_coefficient.
cugraph.jaccard, in the absence of a specified vertex pair list, will
compute the two_hop_neighbors of the entire graph to construct a vertex pair
list and will return the jaccard coefficient for those vertex pairs. This is
not advisable as the vertex_pairs can grow exponentially with respect to the
size of the datasets
networkx.jaccard_coefficient, in the absence of a specified vertex
pair list, will return an upper triangular dense matrix, excluding
the diagonal as well as vertex pairs that are directly connected
by an edge in the graph, of jaccard coefficients. Technically, networkx
returns a lazy iterator across this upper triangular matrix where
the actual jaccard coefficient is computed when the iterator is
dereferenced. Computing a dense matrix of results is not feasible
if the number of vertices in the graph is large (100,000 vertices
would result in 4.9 billion values in that iterator).
If your graph is small enough (or you have enough memory and patience)
you can get the interesting (non-zero) values that are part of the networkx
solution by doing the following:
>>> from cugraph.datasets import karate
>>> input_graph = karate.get_graph(download=True, ignore_weights=True)
>>> pairs = input_graph.get_two_hop_neighbors()
>>> df = cugraph.jaccard(input_graph, pairs)
But please remember that cugraph will fill the dataframe with the entire
solution you request, so you'll need enough memory to store the 2-hop
neighborhood dataframe.
Parameters
----------
input_graph : cugraph.Graph
cuGraph Graph instance, should contain the connectivity information
as an edge list. The graph should be undirected where an undirected
edge is represented by a directed edge in both direction.The adjacency
list will be computed if not already present.
This implementation only supports undirected, non-multi Graphs.
vertex_pair : cudf.DataFrame, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices. If provided, the jaccard coefficient is computed for the
given vertex pairs. If the vertex_pair is not provided then the
current implementation computes the jaccard coefficient for all
adjacent vertices in the graph.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
use_weight : bool, optional (default=False)
Flag to indicate whether to compute weighted jaccard (if use_weight==True)
or un-weighted jaccard (if use_weight==False).
'input_graph' must be weighted if 'use_weight=True'.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the Jaccard weights. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair (will be identical to first if specified).
df['second'] : cudf.Series
The second vertex ID of each pair (will be identical to second if
specified).
df['jaccard_coeff'] : cudf.Series
The computed Jaccard coefficient between the first and the second
vertex ID.
Examples
--------
>>> from cugraph.datasets import karate
>>> from cugraph import jaccard
>>> input_graph = karate.get_graph(download=True, ignore_weights=True)
>>> df = jaccard(input_graph)
"""
if do_expensive_check:
warnings.warn(
"do_expensive_check is deprecated since vertex IDs are no longer "
"required to be consecutively numbered",
FutureWarning,
)
if input_graph.is_directed():
raise ValueError("Input must be an undirected Graph.")
if vertex_pair is None:
# Call two_hop neighbor of the entire graph
vertex_pair = input_graph.get_two_hop_neighbors()
v_p_num_col = len(vertex_pair.columns)
if isinstance(vertex_pair, cudf.DataFrame):
vertex_pair = renumber_vertex_pair(input_graph, vertex_pair)
vertex_pair = ensure_valid_dtype(input_graph, vertex_pair)
src_col_name = vertex_pair.columns[0]
dst_col_name = vertex_pair.columns[1]
first = vertex_pair[src_col_name]
second = vertex_pair[dst_col_name]
elif vertex_pair is not None:
raise ValueError("vertex_pair must be a cudf Dataframe")
first, second, jaccard_coeff = pylibcugraph_jaccard_coefficients(
resource_handle=ResourceHandle(),
graph=input_graph._plc_graph,
first=first,
second=second,
use_weight=use_weight,
do_expensive_check=False,
)
if input_graph.renumbered:
vertex_pair = input_graph.unrenumber(
vertex_pair, src_col_name, preserve_order=True
)
vertex_pair = input_graph.unrenumber(
vertex_pair, dst_col_name, preserve_order=True
)
if v_p_num_col == 2:
# single column vertex
vertex_pair = vertex_pair.rename(
columns={src_col_name: "first", dst_col_name: "second"}
)
df = vertex_pair
df["jaccard_coeff"] = cudf.Series(jaccard_coeff)
return df
def jaccard_coefficient(
G: Union[Graph, "networkx.Graph"],
ebunch: Union[cudf.DataFrame, Iterable[Union[int, str, float]]] = None,
do_expensive_check: bool = False, # deprecated
):
"""
For NetworkX Compatability. See `jaccard`
Parameters
----------
G : cugraph.Graph or NetworkX.Graph
cuGraph or NetworkX Graph instance, should contain the connectivity
information as an edge list. The graph should be undirected where an
undirected edge is represented by a directed edge in both direction.
The adjacency list will be computed if not already present.
This implementation only supports undirected, non-multi Graphs.
ebunch : cudf.DataFrame or iterable of node pairs, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices or iterable of 2-tuples (u, v) where u and v are nodes in
the graph.
If provided, the Overlap coefficient is computed for the given vertex
pairs. Otherwise, the current implementation computes the overlap
coefficient for all adjacent vertices in the graph.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the Jaccard weights. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair (will be identical to first if specified).
df['second'] : cudf.Series
the second vertex ID of each pair (will be identical to second if
specified).
df['jaccard_coeff'] : cudf.Series
The computed Jaccard coefficient between the first and the second
vertex ID.
Examples
--------
>>> from cugraph.datasets import karate
>>> from cugraph import jaccard_coefficient
>>> G = karate.get_graph(download=True)
>>> df = jaccard_coefficient(G)
"""
if do_expensive_check:
warnings.warn(
"do_expensive_check is deprecated since vertex IDs are no longer "
"required to be consecutively numbered",
FutureWarning,
)
vertex_pair = None
G, isNx = ensure_cugraph_obj_for_nx(G)
if isNx is True and ebunch is not None:
vertex_pair = cudf.DataFrame(ebunch)
df = jaccard(G, vertex_pair)
if isNx is True:
df = df_edge_score_to_dictionary(
df, k="jaccard_coeff", src="first", dst="second"
)
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_prediction/woverlap.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.link_prediction import overlap
import cudf
import warnings
from cugraph.structure import Graph
from cugraph.utilities.utils import import_optional
# FIXME: the networkx.Graph type used in type annotations is specified
# using a string literal to avoid depending on and importing networkx.
# Instead, networkx is imported optionally, which may cause a problem
# for a type checker if run in an environment where networkx is not installed.
networkx = import_optional("networkx")
def overlap_w(
input_graph: Graph,
weights: cudf.DataFrame = None, # deprecated
vertex_pair: cudf.DataFrame = None,
do_expensive_check: bool = False, # deprecated
):
"""
Compute the weighted Overlap Coefficient between each pair of vertices
connected by an edge, or between arbitrary pairs of vertices specified by
the user. Overlap Coefficient is defined between two sets as the ratio of
the volume of their intersection divided by the smaller of their volumes.
In the context of graphs, the neighborhood of a vertex is seen as a set.
The Overlap Coefficient weight of each edge represents the strength of
connection between vertices based on the relative similarity of their
neighbors. If first is specified but second is not, or vice versa, an
exception will be thrown.
NOTE: This algorithm doesn't currently support datasets with vertices that
are not (re)numebred vertices from 0 to V-1 where V is the total number of
vertices as this creates isolated vertices.
Parameters
----------
input_graph : cugraph.Graph
cuGraph Graph instance, should contain the connectivity information
as an edge list (edge weights are not used for this algorithm). The
adjacency list will be computed if not already present.
weights : cudf.DataFrame
Specifies the weights to be used for each vertex.
Vertex should be represented by multiple columns for multi-column
vertices.
weights['vertex'] : cudf.Series
Contains the vertex identifiers
weights['weight'] : cudf.Series
Contains the weights of vertices
vertex_pair : cudf.DataFrame, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices. If provided, the overlap coefficient is computed for the
given vertex pairs, else, it is computed for all vertex pairs.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the overlap coefficients. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair.
df['second'] : cudf.Series
The second vertex ID of each pair.
df['overlap_coeff'] : cudf.Series
The computed weighted Overlap coefficient between the first and the
second vertex ID.
Examples
--------
>>> import random
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> # Create a dataframe containing the vertices with their
>>> # corresponding weight
>>> weights = cudf.DataFrame()
>>> # Sample 10 random vertices from the graph and drop duplicates if
>>> # there are any to avoid duplicates vertices with different weight
>>> # value in the 'weights' dataframe
>>> weights['vertex'] = G.nodes().sample(n=10).drop_duplicates()
>>> # Reset the indices and drop the index column
>>> weights.reset_index(inplace=True, drop=True)
>>> # Create a weight column with random weights
>>> weights['weight'] = [random.random() for w in range(
... len(weights['vertex']))]
>>> df = cugraph.overlap_w(G, weights)
"""
warning_msg = (
" overlap_w is deprecated. To compute weighted overlap, please use "
"overlap(input_graph, vertex_pair=False, use_weight=True)"
)
warnings.warn(warning_msg, FutureWarning)
return overlap(input_graph, vertex_pair, do_expensive_check, use_weight=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_prediction/__init__.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.utilities.api_tools import deprecated_warning_wrapper
from cugraph.link_prediction.jaccard import jaccard
from cugraph.link_prediction.jaccard import jaccard_coefficient
from cugraph.link_prediction.sorensen import sorensen
from cugraph.link_prediction.sorensen import sorensen_coefficient
from cugraph.link_prediction.overlap import overlap
from cugraph.link_prediction.overlap import overlap_coefficient
# To be deprecated
from cugraph.link_prediction.wjaccard import jaccard_w
jaccard_w = deprecated_warning_wrapper(jaccard_w)
from cugraph.link_prediction.woverlap import overlap_w
overlap_w = deprecated_warning_wrapper(overlap_w)
from cugraph.link_prediction.wsorensen import sorensen_w
sorensen_w = deprecated_warning_wrapper(sorensen_w)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_prediction/sorensen.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_edge_score_to_dictionary,
renumber_vertex_pair,
)
import cudf
import warnings
from typing import Union, Iterable
from pylibcugraph import (
sorensen_coefficients as pylibcugraph_sorensen_coefficients,
)
from pylibcugraph import ResourceHandle
from cugraph.structure import Graph
from cugraph.utilities.utils import import_optional
# FIXME: the networkx.Graph type used in type annotations is specified
# using a string literal to avoid depending on and importing networkx.
# Instead, networkx is imported optionally, which may cause a problem
# for a type checker if run in an environment where networkx is not installed.
networkx = import_optional("networkx")
# FIXME: Move this function to the utility module so that it can be
# shared by other algos
def ensure_valid_dtype(input_graph, vertex_pair):
vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0]
vertex_pair_dtypes = vertex_pair.dtypes
if vertex_pair_dtypes[0] != vertex_dtype or vertex_pair_dtypes[1] != vertex_dtype:
warning_msg = (
"Sorensen requires 'vertex_pair' to match the graph's 'vertex' type. "
f"input graph's vertex type is: {vertex_dtype} and got "
f"'vertex_pair' of type: {vertex_pair_dtypes}."
)
warnings.warn(warning_msg, UserWarning)
vertex_pair = vertex_pair.astype(vertex_dtype)
return vertex_pair
def sorensen(
input_graph: Graph,
vertex_pair: cudf.DataFrame = None,
do_expensive_check: bool = False, # deprecated
use_weight: bool = False,
):
"""
Compute the Sorensen coefficient between each pair of vertices connected by
an edge, or between arbitrary pairs of vertices specified by the user.
Sorensen coefficient is defined between two sets as the ratio of twice the
volume of their intersection divided by the volume of each set.
If first is specified but second is not, or vice versa, an exception will
be thrown.
cugraph.sorensen, in the absence of a specified vertex pair list, will
compute the two_hop_neighbors of the entire graph to construct a vertex pair
list and will return the sorensen coefficient for those vertex pairs. This is
not advisable as the vertex_pairs can grow exponentially with respect to the
size of the datasets
Parameters
----------
input_graph : cugraph.Graph
cuGraph Graph instance, should contain the connectivity information
as an edge list. The adjacency list will be computed if not already
present.
This implementation only supports undirected, non-multi edge Graph.
vertex_pair : cudf.DataFrame, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices. If provided, the Sorensen coefficient is computed for the
given vertex pairs. If the vertex_pair is not provided then the
current implementation computes the Sorensen coefficient for all
adjacent vertices in the graph.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
use_weight : bool, optional (default=False)
Flag to indicate whether to compute weighted sorensen (if use_weight==True)
or un-weighted sorensen (if use_weight==False).
'input_graph' must be weighted if 'use_weight=True'.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the Sorensen index. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair (will be identical to first if specified).
df['second'] : cudf.Series
The second vertex ID of each pair (will be identical to second if
specified).
df['sorensen_coeff'] : cudf.Series
The computed sorensen coefficient between the first and the second
vertex ID.
Examples
--------
>>> from cugraph.datasets import karate
>>> from cugraph import sorensen
>>> input_graph = karate.get_graph(download=True, ignore_weights=True)
>>> df = sorensen(input_graph)
"""
if do_expensive_check:
warnings.warn(
"do_expensive_check is deprecated since vertex IDs are no longer "
"required to be consecutively numbered",
FutureWarning,
)
if input_graph.is_directed():
raise ValueError("Input must be an undirected Graph.")
if vertex_pair is None:
# Call two_hop neighbor of the entire graph
vertex_pair = input_graph.get_two_hop_neighbors()
v_p_num_col = len(vertex_pair.columns)
if isinstance(vertex_pair, cudf.DataFrame):
vertex_pair = renumber_vertex_pair(input_graph, vertex_pair)
vertex_pair = ensure_valid_dtype(input_graph, vertex_pair)
src_col_name = vertex_pair.columns[0]
dst_col_name = vertex_pair.columns[1]
first = vertex_pair[src_col_name]
second = vertex_pair[dst_col_name]
elif vertex_pair is not None:
raise ValueError("vertex_pair must be a cudf dataframe")
first, second, sorensen_coeff = pylibcugraph_sorensen_coefficients(
resource_handle=ResourceHandle(),
graph=input_graph._plc_graph,
first=first,
second=second,
use_weight=use_weight,
do_expensive_check=False,
)
if input_graph.renumbered:
vertex_pair = input_graph.unrenumber(
vertex_pair, src_col_name, preserve_order=True
)
vertex_pair = input_graph.unrenumber(
vertex_pair, dst_col_name, preserve_order=True
)
if v_p_num_col == 2:
# single column vertex
vertex_pair = vertex_pair.rename(
columns={src_col_name: "first", dst_col_name: "second"}
)
df = vertex_pair
df["sorensen_coeff"] = cudf.Series(sorensen_coeff)
return df
def sorensen_coefficient(
G: Union[Graph, "networkx.Graph"],
ebunch: Union[cudf.DataFrame, Iterable[Union[int, str, float]]] = None,
do_expensive_check: bool = False, # deprecated
):
"""
Compute sorensen coefficient.
Parameters
----------
G : cugraph.Graph or NetworkX.Graph
cuGraph or NetworkX Graph instance, should contain the connectivity
information as an edge list. The graph should be undirected where an
undirected edge is represented by a directed edge in both direction.
The adjacency list will be computed if not already present.
This implementation only supports undirected, non-multi Graphs.
ebunch : cudf.DataFrame or iterable of node pairs, optional (default=None)
A GPU dataframe consisting of two columns representing pairs of
vertices or iterable of 2-tuples (u, v) where u and v are nodes in
the graph.
If provided, the Overlap coefficient is computed for the given vertex
pairs. Otherwise, the current implementation computes the overlap
coefficient for all adjacent vertices in the graph.
do_expensive_check : bool, optional (default=False)
Deprecated.
This option added a check to ensure integer vertex IDs are sequential
values from 0 to V-1. That check is now redundant because cugraph
unconditionally renumbers and un-renumbers integer vertex IDs for
optimal performance, therefore this option is deprecated and will be
removed in a future version.
Returns
-------
df : cudf.DataFrame
GPU data frame of size E (the default) or the size of the given pairs
(first, second) containing the Sorensen weights. The ordering is
relative to the adjacency list, or that given by the specified vertex
pairs.
df['first'] : cudf.Series
The first vertex ID of each pair (will be identical to first if specified).
df['second'] : cudf.Series
The second vertex ID of each pair (will be identical to second if
specified).
df['sorensen_coeff'] : cudf.Series
The computed Sorensen coefficient between the first and the second
vertex ID.
Examples
--------
>>> from cugraph.datasets import karate
>>> from cugraph import sorensen_coefficient
>>> G = karate.get_graph(download=True, ignore_weights=True)
>>> df = sorensen_coefficient(G)
"""
if do_expensive_check:
warnings.warn(
"do_expensive_check is deprecated since vertex IDs are no longer "
"required to be consecutively numbered",
FutureWarning,
)
vertex_pair = None
G, isNx = ensure_cugraph_obj_for_nx(G)
# FIXME: What is the logic behind this since the docstrings mention that 'G' and
# 'ebunch'(if not None) are respectively of type cugraph.Graph and cudf.DataFrame?
if isNx is True and ebunch is not None:
vertex_pair = cudf.DataFrame(ebunch)
df = sorensen(G, vertex_pair)
if isNx is True:
df = df_edge_score_to_dictionary(
df, k="sorensen_coeff", src="first", dst="second"
)
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_analysis/hits.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_score_to_dictionary,
)
from pylibcugraph import ResourceHandle, hits as pylibcugraph_hits
import cudf
import warnings
def hits(G, max_iter=100, tol=1.0e-5, nstart=None, normalized=True):
"""
Compute HITS hubs and authorities values for each vertex
The HITS algorithm computes two numbers for a node. Authorities
estimates the node value based on the incoming links. Hubs estimates
the node value based on outgoing links.
Both cuGraph and networkx implementation use a 1-norm.
Parameters
----------
G : cugraph.Graph
cuGraph graph descriptor, should contain the connectivity information
as an edge list (edge weights are not used for this algorithm).
The adjacency list will be computed if not already present.
max_iter : int, optional (default=100)
The maximum number of iterations before an answer is returned.
tol : float, optional (default=1.0e-5)
Set the tolerance the approximation, this parameter should be a small
magnitude value.
nstart : cudf.Dataframe, optional (default=None)
The initial hubs guess vertices along with their initial hubs guess
value
nstart['vertex'] : cudf.Series
Initial hubs guess vertices
nstart['values'] : cudf.Series
Initial hubs guess values
normalized : bool, optional (default=True)
A flag to normalize the results
Returns
-------
HubsAndAuthorities : cudf.DataFrame
GPU data frame containing three cudf.Series of size V: the vertex
identifiers and the corresponding hubs values and the corresponding
authorities values.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['hubs'] : cudf.Series
Contains the hubs score
df['authorities'] : cudf.Series
Contains the authorities score
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> hits = cugraph.hits(G, max_iter = 50)
"""
G, isNx = ensure_cugraph_obj_for_nx(G, store_transposed=True)
if G.store_transposed is False:
warning_msg = (
"HITS expects the 'store_transposed' flag "
"to be set to 'True' for optimal performance during "
"the graph creation"
)
warnings.warn(warning_msg, UserWarning)
do_expensive_check = False
init_hubs_guess_vertices = None
init_hubs_guess_values = None
if nstart is not None:
init_hubs_guess_vertices = nstart["vertex"]
init_hubs_guess_values = nstart["values"]
vertices, hubs, authorities = pylibcugraph_hits(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
tol=tol,
max_iter=max_iter,
initial_hubs_guess_vertices=init_hubs_guess_vertices,
initial_hubs_guess_values=init_hubs_guess_values,
normalized=normalized,
do_expensive_check=do_expensive_check,
)
results = cudf.DataFrame()
results["vertex"] = cudf.Series(vertices)
results["hubs"] = cudf.Series(hubs)
results["authorities"] = cudf.Series(authorities)
if isNx is True:
d1 = df_score_to_dictionary(results[["vertex", "hubs"]], "hubs")
d2 = df_score_to_dictionary(results[["vertex", "authorities"]], "authorities")
results = (d1, d2)
if G.renumbered:
results = G.unrenumber(results, "vertex")
return results
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_analysis/__init__.py | # Copyright (c) 2019-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.link_analysis.pagerank import pagerank
from cugraph.link_analysis.hits import hits
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/link_analysis/pagerank.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import cudf
import numpy as np
from pylibcugraph import (
pagerank as plc_pagerank,
personalized_pagerank as plc_p_pagerank,
exceptions as plc_exceptions,
ResourceHandle,
)
from cugraph.utilities import (
ensure_cugraph_obj_for_nx,
df_score_to_dictionary,
)
from cugraph.exceptions import FailedToConvergeError
def renumber_vertices(input_graph, input_df):
if len(input_graph.renumber_map.implementation.col_names) > 1:
cols = input_df.columns[:-1].to_list()
else:
cols = "vertex"
input_df = input_graph.add_internal_vertex_id(input_df, "vertex", cols)
return input_df
# FIXME: Move this function to the utility module so that it can be
# shared by other algos
def ensure_valid_dtype(input_graph, input_df, input_df_name):
if input_graph.edgelist.weights is False:
# If the graph is not weighted, an artificial weight column
# of type 'float32' is added and it must match the user
# personalization/nstart values.
edge_attr_dtype = np.float32
else:
edge_attr_dtype = input_graph.edgelist.edgelist_df["weights"].dtype
if "values" in input_df.columns:
input_df_values_dtype = input_df["values"].dtype
if input_df_values_dtype != edge_attr_dtype:
warning_msg = (
f"PageRank requires '{input_df_name}' values "
"to match the graph's 'edge_attr' type. "
f"edge_attr type is: {edge_attr_dtype} and got "
f"'{input_df_name}' values of type: "
f"{input_df_values_dtype}."
)
warnings.warn(warning_msg, UserWarning)
input_df = input_df.astype({"values": edge_attr_dtype})
vertex_dtype = input_graph.edgelist.edgelist_df.dtypes[0]
input_df_vertex_dtype = input_df["vertex"].dtype
if input_df_vertex_dtype != vertex_dtype:
warning_msg = (
f"PageRank requires '{input_df_name}' vertex "
"to match the graph's 'vertex' type. "
f"input graph's vertex type is: {vertex_dtype} and got "
f"'{input_df_name}' vertex of type: "
f"{input_df_vertex_dtype}."
)
warnings.warn(warning_msg, UserWarning)
input_df = input_df.astype({"vertex": vertex_dtype})
return input_df
def pagerank(
G,
alpha=0.85,
personalization=None,
precomputed_vertex_out_weight=None,
max_iter=100,
tol=1.0e-5,
nstart=None,
weight=None,
dangling=None,
fail_on_nonconvergence=True,
):
"""Find the PageRank score for every vertex in a graph. cuGraph computes an
approximation of the Pagerank eigenvector using the power method. The
number of iterations depends on the properties of the network itself; it
increases when the tolerance descreases and/or alpha increases toward the
limiting value of 1. The user is free to use default values or to provide
inputs for the initial guess, tolerance and maximum number of iterations.
All edges will have an edge_attr value of 1.0 if not provided.
Parameters
----------
G : cugraph.Graph or networkx.Graph
cuGraph graph descriptor, should contain the connectivity information
as an edge list.
The transposed adjacency list will be computed if not already present.
alpha : float, optional (default=0.85)
The damping factor alpha represents the probability to follow an
outgoing edge, standard value is 0.85.
Thus, 1.0-alpha is the probability to “teleport” to a random vertex.
Alpha should be greater than 0.0 and strictly lower than 1.0.
personalization : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the personalization information.
(a performance optimization)
personalization['vertex'] : cudf.Series
Subset of vertices of graph for personalization
personalization['values'] : cudf.Series
Personalization values for vertices
precomputed_vertex_out_weight : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the precomputed vertex out weight
information(a performance optimization).
precomputed_vertex_out_weight['vertex'] : cudf.Series
Subset of vertices of graph for precomputed_vertex_out_weight
precomputed_vertex_out_weight['sums'] : cudf.Series
Corresponding precomputed sum of outgoing vertices weight
max_iter : int, optional (default=100)
The maximum number of iterations before an answer is returned. This can
be used to limit the execution time and do an early exit before the
solver reaches the convergence tolerance.
If this value is lower or equal to 0 cuGraph will use the default
value, which is 100.
tol : float, optional (default=1e-05)
Set the tolerance the approximation, this parameter should be a small
magnitude value.
The lower the tolerance the better the approximation. If this value is
0.0f, cuGraph will use the default value which is 1.0E-5.
Setting too small a tolerance can lead to non-convergence due to
numerical roundoff. Usually values between 0.01 and 0.00001 are
acceptable.
nstart : cudf.Dataframe, optional (default=None)
GPU Dataframe containing the initial guess for pagerank.
(a performance optimization).
nstart['vertex'] : cudf.Series
Subset of vertices of graph for initial guess for pagerank values
nstart['values'] : cudf.Series
Pagerank values for vertices
weight: str, optional (default=None)
The attribute column to be used as edge weights if Graph is a NetworkX
Graph. This parameter is here for NetworkX compatibility and is ignored
in case of a cugraph.Graph
dangling : dict, optional (default=None)
This parameter is here for NetworkX compatibility and ignored
fail_on_nonconvergence : bool (default=True)
If the solver does not reach convergence, raise an exception if
fail_on_nonconvergence is True. If fail_on_nonconvergence is False,
the return value is a tuple of (pagerank, converged) where pagerank is
a cudf.DataFrame as described below, and converged is a boolean
indicating if the solver converged (True) or not (False).
Returns
-------
The return value varies based on the value of the fail_on_nonconvergence
paramter. If fail_on_nonconvergence is True:
PageRank : cudf.DataFrame
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding PageRank values.
NOTE: if the input cugraph.Graph was created using the renumber=False
option of any of the from_*_edgelist() methods, pagerank assumes that
the vertices in the edgelist are contiguous and start from 0.
If the actual set of vertices in the edgelist is not
contiguous (has gaps) or does not start from zero, pagerank will assume
the "missing" vertices are isolated vertices in the graph, and will
compute and return pagerank values for each. If this is not the desired
behavior, ensure the input cugraph.Graph is created from the
from_*_edgelist() functions with the renumber=True option (the default)
df['vertex'] : cudf.Series
Contains the vertex identifiers
df['pagerank'] : cudf.Series
Contains the PageRank score
If fail_on_nonconvergence is False:
(PageRank, converged) : tuple of (cudf.DataFrame, bool)
PageRank is the GPU dataframe described above, converged is a bool
indicating if the solver converged (True) or not (False).
Examples
--------
>>> from cugraph.datasets import karate
>>> G = karate.get_graph(download=True)
>>> pr = cugraph.pagerank(G, alpha = 0.85, max_iter = 500, tol = 1.0e-05)
"""
initial_guess_vertices = None
initial_guess_values = None
pre_vtx_o_wgt_vertices = None
pre_vtx_o_wgt_sums = None
G, isNx = ensure_cugraph_obj_for_nx(G, weight, store_transposed=True)
if G.store_transposed is False:
warning_msg = (
"Pagerank expects the 'store_transposed' flag "
"to be set to 'True' for optimal performance during "
"the graph creation"
)
warnings.warn(warning_msg, UserWarning)
do_expensive_check = False
if nstart is not None:
if G.renumbered is True:
nstart = renumber_vertices(G, nstart)
nstart = ensure_valid_dtype(G, nstart, "nstart")
initial_guess_vertices = nstart["vertex"]
initial_guess_values = nstart["values"]
if precomputed_vertex_out_weight is not None:
if G.renumbered is True:
precomputed_vertex_out_weight = renumber_vertices(
G, precomputed_vertex_out_weight
)
precomputed_vertex_out_weight = ensure_valid_dtype(
G, precomputed_vertex_out_weight, "precomputed_vertex_out_weight"
)
pre_vtx_o_wgt_vertices = precomputed_vertex_out_weight["vertex"]
pre_vtx_o_wgt_sums = precomputed_vertex_out_weight["sums"]
try:
if personalization is not None:
if not isinstance(personalization, cudf.DataFrame):
raise NotImplementedError(
"personalization other than a cudf dataframe currently not "
"supported"
)
if G.renumbered is True:
personalization = renumber_vertices(G, personalization)
personalization = ensure_valid_dtype(G, personalization, "personalization")
result_tuple = plc_p_pagerank(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
precomputed_vertex_out_weight_vertices=pre_vtx_o_wgt_vertices,
precomputed_vertex_out_weight_sums=pre_vtx_o_wgt_sums,
personalization_vertices=personalization["vertex"],
personalization_values=personalization["values"],
initial_guess_vertices=initial_guess_vertices,
initial_guess_values=initial_guess_values,
alpha=alpha,
epsilon=tol,
max_iterations=max_iter,
do_expensive_check=do_expensive_check,
fail_on_nonconvergence=fail_on_nonconvergence,
)
else:
result_tuple = plc_pagerank(
resource_handle=ResourceHandle(),
graph=G._plc_graph,
precomputed_vertex_out_weight_vertices=pre_vtx_o_wgt_vertices,
precomputed_vertex_out_weight_sums=pre_vtx_o_wgt_sums,
initial_guess_vertices=initial_guess_vertices,
initial_guess_values=initial_guess_values,
alpha=alpha,
epsilon=tol,
max_iterations=max_iter,
do_expensive_check=do_expensive_check,
fail_on_nonconvergence=fail_on_nonconvergence,
)
# Re-raise this as a cugraph exception so users trying to catch this do not
# have to know to import another package.
except plc_exceptions.FailedToConvergeError as exc:
raise FailedToConvergeError from exc
df = cudf.DataFrame()
df["vertex"] = result_tuple[0]
df["pagerank"] = result_tuple[1]
if G.renumbered:
df = G.unrenumber(df, "vertex")
if isNx is True:
df = df_score_to_dictionary(df, "pagerank")
if fail_on_nonconvergence:
return df
else:
return (df, result_tuple[2])
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/pointer_utils.pyx | # Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libc.stdint cimport uintptr_t
'''
cdef extern from "cugraph.h" namespace "cugraph":
cdef int get_device(void *ptr)
def device_of_gpu_pointer(g):
cdef uintptr_t cptr = g.device_ctypes_pointer.value
return get_device(<void*> cptr)
'''
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources path_retrieval_wrapper.pyx)
set(linked_libraries cugraph::cugraph)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX utilities_
ASSOCIATED_TARGETS cugraph
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/nx_factory.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities specific to converting to/from NetworkX.
NetworkX is required at runtime in order to call any of these functions, so
ensure code using these utilities has done the proper checks prior to calling.
"""
import cugraph
from .utils import import_optional
import cudf
from cudf import from_pandas
from cudf.api.types import is_integer_dtype
# nx will be a MissingModule instance if NetworkX is not installed (any
# attribute access on a MissingModule instance results in a RuntimeError).
nx = import_optional("networkx")
def convert_unweighted_to_gdf(NX_G, vertex_type="int32"):
_edges = NX_G.edges(data=False)
src = [s for s, _ in _edges]
dst = [d for _, d in _edges]
_gdf = cudf.DataFrame()
_gdf["src"] = cudf.Series(src)
_gdf["dst"] = cudf.Series(dst)
if is_integer_dtype(_gdf["src"]) or is_integer_dtype(_gdf["dst"]):
_gdf = _gdf.astype(vertex_type)
return _gdf
def convert_weighted_named_to_gdf(NX_G, weight, vertex_type="int32"):
_edges = NX_G.edges(data=weight)
src = [s for s, _, _ in _edges]
dst = [d for _, d, _ in _edges]
wt = [w for _, _, w in _edges]
_gdf = cudf.DataFrame()
_gdf["src"] = cudf.Series(src)
_gdf["dst"] = cudf.Series(dst)
_gdf["weight"] = wt
if is_integer_dtype(_gdf["src"]) or is_integer_dtype(_gdf["dst"]):
_gdf = _gdf.astype({"src": vertex_type, "dst": vertex_type})
# FIXME: The weight dtype is hardcoded.
_gdf = _gdf.astype({"weight": "float32"})
return _gdf
def convert_weighted_unnamed_to_gdf(NX_G, vertex_type="int32"):
_pdf = nx.to_pandas_edgelist(NX_G)
nx_col = ["source", "target"]
wt_col = [col for col in _pdf.columns if col not in nx_col]
if len(wt_col) != 1:
raise ValueError("Unable to determine weight column name")
if wt_col[0] != "weight":
_pdf.rename(columns={wt_col[0]: "weight"})
_gdf = from_pandas(_pdf)
if is_integer_dtype(_gdf["source"]) or is_integer_dtype(_gdf["target"]):
_gdf = _gdf.astype({"source": vertex_type, "target": vertex_type})
return _gdf
def convert_from_nx(
nxG, weight=None, do_renumber=True, store_transposed=False, vertex_type="int32"
):
"""
Convert a NetworkX Graph into a cuGraph Graph.
This might not be the most effecient way since the
process first extracts the data from Nx into a Pandas array.
Parameters
----------
nxG : NetworkX Graph
The NetworkX Graph top be converted.
weight : str or None
the weight column name. If the graph is weighted this
identifies which column in the Nx data to extract
do_renumber : boolean, default is True
Should the data be renumbered
store_transposed : boolean, defaukt is False
should the cuGraph Graph store the transpose of the graph
vertex_type : str, default is "int32"
Vertex type
Returns
-------
G : cuGraph Graph
"""
if isinstance(nxG, nx.classes.digraph.DiGraph):
G = cugraph.Graph(directed=True)
elif isinstance(nxG, nx.classes.graph.Graph):
G = cugraph.Graph()
else:
raise TypeError(
f"nxG must be either a NetworkX Graph or DiGraph, got {type(nxG)}"
)
is_weighted = nx.is_weighted(nxG, weight=weight)
if is_weighted is False:
_gdf = convert_unweighted_to_gdf(nxG, vertex_type)
G.from_cudf_edgelist(
_gdf,
source="src",
destination="dst",
edge_attr=None,
renumber=do_renumber,
store_transposed=store_transposed,
)
else:
if weight is None:
_gdf = convert_weighted_unnamed_to_gdf(nxG, vertex_type)
G.from_cudf_edgelist(
_gdf,
source="source",
destination="target",
edge_attr="weight",
renumber=do_renumber,
store_transposed=store_transposed,
)
else:
_gdf = convert_weighted_named_to_gdf(nxG, weight, vertex_type)
G.from_cudf_edgelist(
_gdf,
source="src",
destination="dst",
edge_attr="weight",
renumber=do_renumber,
store_transposed=store_transposed,
)
return G
def df_score_to_dictionary(df, k, v="vertex"):
"""
Convert a dataframe to a dictionary
Parameters
----------
df : cudf.DataFrame
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding score values.
Please note that the resulting the 'vertex' column might not be
in ascending order.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df[..] : cudf.Series
Contains the scores of the vertices
k : str
score column name
v : str
the vertex column name. Default is "vertex"
Returns
-------
dict : Dictionary of vertices and score
"""
df = df.sort_values(by=v)
return df.to_pandas().set_index(v).to_dict()[k]
def df_edge_score_to_dictionary(df, k, src="src", dst="dst"):
"""
Convert a dataframe to a dictionary
Parameters
----------
df : cudf.DataFrame
GPU data frame containing two cudf.Series of size V: the vertex
identifiers and the corresponding score values.
Please note that the resulting the 'vertex' column might not be
in ascending order.
df['vertex'] : cudf.Series
Contains the vertex identifiers
df[X] : cudf.Series
Contains the scores of the vertices
k : str
score column name
src : str
source column name
dst : str
destination column name
Returns
-------
dict : Dictionary of vertices and score
"""
pdf = df.sort_values(by=[src, dst]).to_pandas()
d = {}
for i in range(len(pdf)):
d[(pdf[src][i], pdf[dst][i])] = pdf[k][i]
return d
def cugraph_to_nx(G):
pdf = G.view_edge_list().to_pandas()
num_col = len(pdf.columns)
source = G.source_columns
target = G.destination_columns
if num_col == 2:
Gnx = nx.from_pandas_edgelist(pdf, source=source, target=target)
else:
edge_attr = G.weight_column
Gnx = nx.from_pandas_edgelist(
pdf, source=source, target=target, edge_attr=edge_attr
)
return Gnx
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/path_retrieval.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cudf
from cugraph.structure.symmetrize import symmetrize
from cugraph.structure.number_map import NumberMap
from cugraph.utilities import path_retrieval_wrapper
def get_traversed_cost(df, source, source_col, dest_col, value_col):
"""
Take the DataFrame result from a BFS or SSSP function call and sums
the given weights along the path to the starting vertex.
The source_col, dest_col identifiers need to match with the vertex and
predecessor columns of df.
Input Parameters
----------
df : cudf.DataFrame
The dataframe containing the results of a BFS or SSSP call
source: int
Index of the source vertex.
source_col : cudf.DataFrame
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the source index for each edge.
Source indices must be an integer type.
dest_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the destination index for each edge.
Destination indices must be an integer type.
value_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains values associated with this edge.
Weight should be a floating type.
Returns
---------
df : cudf.DataFrame
DataFrame containing two columns 'vertex' and 'info'.
Unreachable vertices will have value the max value of the weight type.
"""
if "vertex" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'vertex' column missing"
)
if "distance" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'distance' column missing"
)
if "predecessor" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'predecessor' column missing"
)
src, dst, val = symmetrize(source_col, dest_col, value_col)
symmetrized_df = cudf.DataFrame()
symmetrized_df["source"] = src
symmetrized_df["destination"] = dst
symmetrized_df["weights"] = val
input_df = df.merge(
symmetrized_df,
left_on=["vertex", "predecessor"],
right_on=["source", "destination"],
how="left",
)
# Set unreachable vertex weights to max float and source vertex weight to 0
max_val = np.finfo(val.dtype).max
input_df[["weights"]] = input_df[["weights"]].fillna(max_val)
input_df.loc[input_df["vertex"] == source, "weights"] = 0
# Renumber
renumbered_gdf, renumber_map = NumberMap.renumber(
input_df, ["vertex"], ["predecessor"], preserve_order=True
)
renumbered_gdf = renumbered_gdf.rename(
columns={"src": "vertex", "dst": "predecessor"}
)
stop_vertex = renumber_map.to_internal_vertex_id(cudf.Series(-1)).values[0]
out_df = path_retrieval_wrapper.get_traversed_cost(renumbered_gdf, stop_vertex)
# Unrenumber
out_df["vertex"] = renumber_map.unrenumber(
renumbered_gdf, "vertex", preserve_order=True
)["vertex"]
return out_df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/path_retrieval.pxd | # Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.structure.graph_primtypes cimport *
cdef extern from "cugraph/utilities/path_retrieval.hpp" namespace "cugraph":
cdef void get_traversed_cost[vertex_t, weight_t](const handle_t &handle,
const vertex_t *vertices,
const vertex_t *preds,
const weight_t *info_weights,
weight_t *out,
vertex_t stop_vertex,
vertex_t num_vertices) except +
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/grmat.py | # Copyright (c) 2019-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.utilities import grmat_wrapper
def grmat_gen(argv):
vertices, edges, source_col, dest_col = grmat_wrapper.grmat_gen(argv)
return vertices, edges, source_col, dest_col
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/api_tools.py | # Copyright (c) 2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pylibcugraph.utilities.api_tools as api_tools
experimental_prefix = "EXPERIMENTAL"
def experimental_warning_wrapper(obj):
return api_tools.experimental_warning_wrapper(obj)
def promoted_experimental_warning_wrapper(obj):
return api_tools.promoted_experimental_warning_wrapper(obj)
def deprecated_warning_wrapper(obj):
return api_tools.deprecated_warning_wrapper(obj)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx | # Copyright (c) 2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.utilities.path_retrieval cimport get_traversed_cost as c_get_traversed_cost
from cugraph.structure.graph_primtypes cimport *
from libc.stdint cimport uintptr_t
from numba import cuda
import cudf
import numpy as np
def get_traversed_cost(input_df, stop_vertex):
"""
Call get_traversed_cost
"""
num_verts = input_df.shape[0]
vertex_t = input_df.vertex.dtype
weight_t = input_df.weights.dtype
df = cudf.DataFrame()
df['vertex'] = input_df['vertex']
df['info'] = cudf.Series(np.zeros(num_verts, dtype=weight_t))
cdef unique_ptr[handle_t] handle_ptr
handle_ptr.reset(new handle_t())
handle_ = handle_ptr.get();
cdef uintptr_t vertices = <uintptr_t>NULL
cdef uintptr_t preds = <uintptr_t>NULL
cdef uintptr_t out = <uintptr_t>NULL
cdef uintptr_t info_weights = <uintptr_t>NULL
vertices = input_df['vertex'].__cuda_array_interface__['data'][0]
preds = input_df['predecessor'].__cuda_array_interface__['data'][0]
info_weights = input_df['weights'].__cuda_array_interface__['data'][0]
out = df['info'].__cuda_array_interface__['data'][0]
if weight_t == np.float32:
c_get_traversed_cost(handle_[0],
<int *> vertices,
<int *> preds,
<float *> info_weights,
<float *> out,
<int> stop_vertex,
<int> num_verts)
elif weight_t == np.float64:
c_get_traversed_cost(handle_[0],
<int *> vertices,
<int *> preds,
<double *> info_weights,
<double *> out,
<int> stop_vertex,
<int> num_verts)
else:
raise NotImplementedError
return df
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/__init__.py | # Copyright (c) 2019-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from cugraph.utilities.grmat import grmat_gen
# from cugraph.utilities.pointer_utils import device_of_gpu_pointer
from cugraph.utilities.nx_factory import convert_from_nx
from cugraph.utilities.nx_factory import df_score_to_dictionary
from cugraph.utilities.nx_factory import df_edge_score_to_dictionary
from cugraph.utilities.nx_factory import cugraph_to_nx
from cugraph.utilities.utils import (
import_optional,
ensure_cugraph_obj,
ensure_cugraph_obj_for_nx,
is_matrix_type,
is_cp_matrix_type,
is_sp_matrix_type,
is_nx_graph_type,
renumber_vertex_pair,
cupy_package,
)
from cugraph.utilities.path_retrieval import get_traversed_cost
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/utilities/utils.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
import shutil
from numba import cuda
import cudf
from cudf.core.column import as_column
from cuda.cudart import cudaDeviceAttr
from rmm._cuda.gpu import getDeviceAttribute
# optional dependencies
try:
import cupy as cp
from cupyx.scipy.sparse import coo_matrix as cp_coo_matrix
from cupyx.scipy.sparse import csr_matrix as cp_csr_matrix
from cupyx.scipy.sparse import csc_matrix as cp_csc_matrix
__cp_matrix_types = [cp_coo_matrix, cp_csr_matrix, cp_csc_matrix]
__cp_compressed_matrix_types = [cp_csr_matrix, cp_csc_matrix]
except ModuleNotFoundError:
cp = None
__cp_matrix_types = []
__cp_compressed_matrix_types = []
cupy_package = cp
try:
import scipy as sp
from scipy.sparse import coo_matrix as sp_coo_matrix
from scipy.sparse import csr_matrix as sp_csr_matrix
from scipy.sparse import csc_matrix as sp_csc_matrix
__sp_matrix_types = [sp_coo_matrix, sp_csr_matrix, sp_csc_matrix]
__sp_compressed_matrix_types = [sp_csr_matrix, sp_csc_matrix]
except ModuleNotFoundError:
sp = None
__sp_matrix_types = []
__sp_compressed_matrix_types = []
scipy_package = sp
try:
import networkx as nx
__nx_graph_types = [nx.Graph, nx.DiGraph]
except ModuleNotFoundError:
nx = None
__nx_graph_types = []
nx_package = nx
def get_traversed_path(df, id):
"""
Take the DataFrame result from a BFS or SSSP function call and extract
the path to a specified vertex.
Input Parameters
----------
df : cudf.DataFrame
The dataframe containing the results of a BFS or SSSP call
id : vertex ID
most be the same data types as what is in the dataframe
Returns
---------
df : cudf.DataFrame
a dataframe containing the path steps
Examples
--------
>>> gdf = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(gdf, source='0', destination='1', edge_attr='2')
>>> sssp_df = cugraph.sssp(G, 1)
>>> path = cugraph.utils.get_traversed_path(sssp_df, 32)
>>> path
distance vertex predecessor
... ... ... ...
... ... ... ...
... ... ... ...
"""
if "vertex" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'vertex' column missing"
)
if "distance" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'distance' column missing"
)
if "predecessor" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'predecessor' column missing"
)
if isinstance(id, type(df["vertex"].iloc[0])):
raise ValueError("The vertex 'id' needs to be the same as df['vertex']")
# There is no guarantee that the dataframe has not been filtered
# or edited. Therefore we cannot assume that using the vertex ID
# as an index will work
ddf = df[df["vertex"] == id]
if len(ddf) == 0:
raise ValueError("The vertex (", id, " is not in the result set")
pred = ddf["predecessor"].iloc[0]
answer = []
answer.append(ddf)
while pred != -1:
ddf = df[df["vertex"] == pred]
pred = ddf["predecessor"].iloc[0]
answer.append(ddf)
return cudf.concat(answer)
def get_traversed_path_list(df, id):
"""
Take the DataFrame result from a BFS or SSSP function call and extract
the path to a specified vertex as a series of steps
Input Parameters
----------
df : cudf.DataFrame
The dataframe containing the results of a BFS or SSSP call
id : Int
The vertex ID
Returns
---------
a : Python array
a ordered array containing the steps from id to root
Examples
--------
>>> gdf = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(gdf, source='0', destination='1', edge_attr='2')
>>> sssp_df = cugraph.sssp(G, 1)
>>> path = cugraph.utils.get_traversed_path_list(sssp_df, 32)
"""
if "vertex" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'vertex' column missing"
)
if "distance" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'distance' column missing"
)
if "predecessor" not in df.columns:
raise ValueError(
"DataFrame does not appear to be a BFS or "
"SSP result - 'predecessor' column missing"
)
if isinstance(id, type(df["vertex"].iloc[0])):
raise ValueError("The vertex 'id' needs to be the same as df['vertex']")
# There is no guarantee that the dataframe has not been filtered
# or edited. Therefore we cannot assume that using the vertex ID
# as an index will work
pred = -1
answer = []
answer.append(id)
ddf = df[df["vertex"] == id]
if len(ddf) == 0:
raise ValueError("The vertex (", id, " is not in the result set")
pred = ddf["predecessor"].iloc[0]
while pred != -1:
answer.append(pred)
ddf = df[df["vertex"] == pred]
pred = ddf["predecessor"].iloc[0]
return answer
def is_cuda_version_less_than(min_version=(10, 2)):
"""
Returns True if the version of CUDA being used is less than min_version
"""
this_cuda_ver = cuda.runtime.get_version() # returns (<major>, <minor>)
if this_cuda_ver[0] > min_version[0]:
return False
if this_cuda_ver[0] < min_version[0]:
return True
if this_cuda_ver[1] < min_version[1]:
return True
return False
def is_device_version_less_than(min_version=(7, 0)):
"""
Returns True if the version of CUDA being used is less than min_version
"""
major_version = getDeviceAttribute(
cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, 0
)
minor_version = getDeviceAttribute(
cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, 0
)
if major_version > min_version[0]:
return False
if major_version < min_version[0]:
return True
if minor_version < min_version[1]:
return True
return False
def get_device_memory_info():
"""
Returns the total amount of global memory on the device in bytes
"""
meminfo = cuda.current_context().get_memory_info()
return meminfo[1]
# FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if
# set. An additional optional parameter for the weight attr name when accepting
# Nx graphs may be needed. From the Nx docs:
# | Many NetworkX algorithms designed for weighted graphs use
# | an edge attribute (by default `weight`) to hold a numerical value.
def ensure_cugraph_obj(obj, nx_weight_attr=None, matrix_graph_type=None):
"""
Convert the input obj - if possible - to a cuGraph Graph-type obj (Graph,
etc.) and return a tuple of (cugraph Graph-type obj, original
input obj type). If matrix_graph_type is specified, it is used as the
cugraph Graph-type obj to create when converting from a matrix type.
"""
# FIXME: importing here to avoid circular import
from cugraph.structure import Graph
from cugraph.utilities.nx_factory import convert_from_nx
input_type = type(obj)
if is_cugraph_graph_type(input_type):
return (obj, input_type)
elif is_nx_graph_type(input_type):
return (convert_from_nx(obj, weight=nx_weight_attr), input_type)
elif (input_type in __cp_matrix_types) or (input_type in __sp_matrix_types):
if matrix_graph_type is None:
matrix_graph_type = Graph
elif matrix_graph_type not in [Graph]:
if not isinstance(matrix_graph_type, Graph):
raise TypeError(
f"matrix_graph_type must be either a cugraph "
f"Graph, got: {matrix_graph_type}"
)
if input_type in (__cp_compressed_matrix_types + __sp_compressed_matrix_types):
coo = obj.tocoo(copy=False)
else:
coo = obj
if input_type in __cp_matrix_types:
df = cudf.DataFrame(
{
"source": cp.ascontiguousarray(coo.row),
"destination": cp.ascontiguousarray(coo.col),
"weight": cp.ascontiguousarray(coo.data),
}
)
else:
df = cudf.DataFrame(
{"source": coo.row, "destination": coo.col, "weight": coo.data}
)
# FIXME:
# * do a quick check that symmetry is stored explicitly in the cupy
# data for sym matrices (ie. for each uv, check vu is there)
# * populate the cugraph graph with directed data and set renumbering
# to false in from edge list call.
if isinstance(matrix_graph_type, Graph):
G = matrix_graph_type
else:
G = matrix_graph_type()
G.from_cudf_edgelist(df, edge_attr="weight", renumber=True)
return (G, input_type)
else:
raise TypeError(f"obj of type {input_type} is not supported.")
# FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if
# set. An additional optional parameter for the weight attr name when accepting
# Nx graphs may be needed. From the Nx docs:
# | Many NetworkX algorithms designed for weighted graphs use
# | an edge attribute (by default `weight`) to hold a numerical value.
def ensure_cugraph_obj_for_nx(
obj, nx_weight_attr="weight", store_transposed=False, vertex_type="int32"
):
"""
Ensures a cuGraph Graph-type obj is returned for either cuGraph or Nx
Graph-type objs. If obj is a Nx type,
"""
# FIXME: importing here to avoid circular import
from cugraph.utilities.nx_factory import convert_from_nx
input_type = type(obj)
if is_nx_graph_type(input_type):
return (
convert_from_nx(
obj,
weight=nx_weight_attr,
store_transposed=store_transposed,
vertex_type=vertex_type,
),
True,
)
elif is_cugraph_graph_type(input_type):
return (obj, False)
else:
raise TypeError(
"input must be either a cuGraph or NetworkX graph "
f"type, got {input_type}"
)
def is_cp_matrix_type(m):
return m in __cp_matrix_types
def is_sp_matrix_type(m):
return m in __sp_matrix_types
def is_matrix_type(m):
return is_cp_matrix_type(m) or is_sp_matrix_type(m)
def is_nx_graph_type(graph_type):
return graph_type in __nx_graph_types
def is_cugraph_graph_type(g):
# FIXME: importing here to avoid circular import
from cugraph.structure import Graph, MultiGraph
return g in [Graph, MultiGraph]
def renumber_vertex_pair(input_graph, vertex_pair):
vertex_size = input_graph.vertex_column_size()
columns = vertex_pair.columns.to_list()
if vertex_size == 1:
for col in vertex_pair.columns:
if input_graph.renumbered:
vertex_pair = input_graph.add_internal_vertex_id(vertex_pair, col, col)
else:
if input_graph.renumbered:
vertex_pair = input_graph.add_internal_vertex_id(
vertex_pair, "src", columns[:vertex_size]
)
vertex_pair = input_graph.add_internal_vertex_id(
vertex_pair, "dst", columns[vertex_size:]
)
return vertex_pair
class MissingModule:
"""
Raises RuntimeError when any attribute is accessed on instances of this
class.
Instances of this class are returned by import_optional() when a module
cannot be found, which allows for code to import optional dependencies, and
have only the code paths that use the module affected.
"""
def __init__(self, mod_name):
self.name = mod_name
def __getattr__(self, attr):
raise RuntimeError(f"This feature requires the {self.name} " "package/module")
def import_optional(mod, default_mod_class=MissingModule):
"""
import the "optional" module 'mod' and return the module object or object.
If the import raises ModuleNotFoundError, returns an instance of
default_mod_class.
This method was written to support importing "optional" dependencies so
code can be written to run even if the dependency is not installed.
Example
-------
>> from cugraph.utils import import_optional
>> nx = import_optional("networkx") # networkx is not installed
>> G = nx.Graph()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
RuntimeError: This feature requires the networkx package/module
Example
-------
>> class CuDFFallback:
.. def __init__(self, mod_name):
.. assert mod_name == "cudf"
.. warnings.warn("cudf could not be imported, using pandas instead!")
.. def __getattr__(self, attr):
.. import pandas
.. return getattr(pandas, attr)
...
>> from cugraph.utils import import_optional
>> df_mod = import_optional("cudf", default_mod_class=CuDFFallback)
<stdin>:4: UserWarning: cudf could not be imported, using pandas instead!
>> df = df_mod.DataFrame()
>> df
Empty DataFrame
Columns: []
Index: []
>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>
"""
try:
return importlib.import_module(mod)
except ModuleNotFoundError:
return default_mod_class(mod_name=mod)
def create_random_bipartite(v1, v2, size, dtype):
# Creates a full bipartite graph
import numpy as np
from cugraph.structure import Graph
df1 = cudf.DataFrame()
df1["src"] = cudf.Series(range(0, v1, 1))
df1["key"] = 1
df2 = cudf.DataFrame()
df2["dst"] = cudf.Series(range(v1, v1 + v2, 1))
df2["key"] = 1
edges = df1.merge(df2, on="key")[["src", "dst"]]
edges = edges.sort_values(["src", "dst"]).reset_index()
# Generate edge weights
a = np.random.randint(1, high=size, size=(v1, v2)).astype(dtype)
edges["weight"] = a.flatten()
g = Graph()
g.from_cudf_edgelist(
edges, source="src", destination="dst", edge_attr="weight", renumber=False
)
return df1["src"], g, a
def sample_groups(df, by, n_samples):
# Sample n_samples in the df using the by column
# Step 1
# first, shuffle the dataframe and reset its index,
# so that the ordering of values within each group
# is made random:
df = df.sample(frac=1).reset_index(drop=True)
# If we want to keep all samples we return
if n_samples == -1:
return df
# Step 2
# add an integer-encoded version of the "by" column,
# since the rank aggregation seems not to work for
# non-numeric data
df["_"] = df[by].astype("category").cat.codes
# Step 3
# now do a "rank" aggregation and filter out only
# the first N_SAMPLES ranks.
result = df.loc[df.groupby(by)["_"].rank("first") <= n_samples, :]
del result["_"]
return result
def create_list_series_from_2d_ar(ar, index):
"""
Create a cudf list series from 2d arrays
"""
n_rows, n_cols = ar.shape
data = as_column(ar.flatten())
offset_col = as_column(
cp.arange(start=0, stop=len(data) + 1, step=n_cols), dtype="int32"
)
mask_col = cp.full(shape=n_rows, fill_value=True)
mask = cudf._lib.transform.bools_to_mask(as_column(mask_col))
lc = cudf.core.column.ListColumn(
size=n_rows,
dtype=cudf.ListDtype(data.dtype),
mask=mask,
offset=0,
null_count=0,
children=(offset_col, data),
)
return cudf.Series(lc, index=index)
def create_directory_with_overwrite(directory):
"""
Creates the given directory. If it already exists, the
existing directory is recursively deleted first.
"""
if os.path.exists(directory):
shutil.rmtree(directory)
os.makedirs(directory)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_classes.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .graph_implementation import (
simpleGraphImpl,
simpleDistributedGraphImpl,
npartiteGraphImpl,
)
import cudf
import dask_cudf
from cugraph.utilities.utils import import_optional
pd = import_optional("pandas")
# TODO: Move to utilities
def null_check(input_data):
# input_data can be cudf.Series, cudf.DataFrame, dask_cudf.Series
# and dask_cudf.DataFrame
has_null = input_data.isna().values.any()
if isinstance(input_data, (dask_cudf.Series, dask_cudf.DataFrame)):
has_null = has_null.compute()
if has_null:
raise ValueError("Series/DataFrame contains NULL values")
class Graph:
"""
A GPU Graph Object (Base class of other graph types)
Parameters
----------
m_graph : cuGraph.MultiGraph object or None (default=None)
Initialize the graph from a cugraph.MultiGraph object
directed : boolean, optional (default=False)
Indicated is the graph is directed.
Examples
--------
>>> # undirected graph
>>> G = cugraph.Graph()
>>> # directed graph
>>> G = cugraph.Graph(directed=True)
"""
class Properties:
def __init__(self, directed):
self.directed = directed
self.weights = False
def __init__(self, m_graph=None, directed=False):
self._Impl = None
self.graph_properties = Graph.Properties(directed)
if m_graph is not None:
if isinstance(m_graph, MultiGraph):
elist = m_graph.view_edge_list()
if m_graph.is_weighted():
weights = m_graph.weight_column
else:
weights = None
self.from_cudf_edgelist(
elist,
source=m_graph.source_columns,
destination=m_graph.destination_columns,
edge_attr=weights,
)
else:
raise TypeError(
"m_graph can only be an instance of a "
f"cugraph.MultiGraph, got {type(m_graph)}"
)
def __getattr__(self, name):
"""
__getattr__() is called automatically by python when an attribute does not
exist. Since this class is attempting to hide the internal `_Impl` object,
which is intended to contain many of the attributes needed by this class,
__getattr__ is used to "pass through" attribute access to _Impl and make it
appear as if the _Impl attributes are contained in this class.
"""
if name == "_Impl":
raise AttributeError(name)
if hasattr(self._Impl, name):
return getattr(self._Impl, name)
# FIXME: Remove access to Impl properties
elif hasattr(self._Impl.properties, name):
return getattr(self._Impl.properties, name)
else:
raise AttributeError(name)
def __dir__(self):
return dir(self._Impl)
def from_cudf_edgelist(
self,
input_df,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initialize a graph from the edge list. It is an error to call this
method on an initialized Graph object. The passed input_df argument
wraps gdf_column objects that represent a graph using the edge list
format. source argument is source column name and destination argument
is destination column name.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
If weights are present, edge_attr argument is the weights column name.
Parameters
----------
input_df : cudf.DataFrame or dask_cudf.DataFrame
A DataFrame that contains edge information If a dask_cudf.DataFrame
is passed it will be reinterpreted as a cudf.DataFrame. For the
distributed path please use from_dask_cudf_edgelist.
source : str or array-like, optional (default='source')
source column name or array of column names
destination : str or array-like, optional (default='destination')
destination column name or array of column names
edge_attr : str or List[str], optional (default=None)
Names of the edge attributes. Can either be a single string
representing the weight column name, or a list of length 3
holding [weight, edge_id, edge_type]. If this argument is
provided, then the weight/edge_id/edge_type arguments must
be left empty.
weight : str, optional (default=None)
Name of the weight column in the input dataframe.
edge_id : str, optional (default=None)
Name of the edge id column in the input dataframe.
edge_type : str, optional (default=None)
Name of the edge type column in the input dataframe.
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
Examples
--------
>>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(df, source='0', destination='1',
... edge_attr='2', renumber=False)
"""
if self._Impl is None:
self._Impl = simpleGraphImpl(self.graph_properties)
elif type(self._Impl) is not simpleGraphImpl:
raise RuntimeError("Graph is already initialized")
elif self._Impl.edgelist is not None or self._Impl.adjlist is not None:
raise RuntimeError("Graph already has values")
self._Impl._simpleGraphImpl__from_edgelist(
input_df,
source=source,
destination=destination,
edge_attr=edge_attr,
weight=weight,
edge_id=edge_id,
edge_type=edge_type,
renumber=renumber,
store_transposed=store_transposed,
legacy_renum_only=legacy_renum_only,
)
def from_cudf_adjlist(
self,
offset_col,
index_col,
value_col=None,
renumber=True,
store_transposed=False,
):
"""
Initialize a graph from the adjacency list. It is an error to call this
method on an initialized Graph object. The passed offset_col and
index_col arguments wrap gdf_column objects that represent a graph
using the adjacency list format.
If value_col is None, an unweighted graph is created. If value_col is
not None, a weighted graph is created.
Undirected edges must be stored as directed edges in both directions.
Parameters
----------
offset_col : cudf.Series
This cudf.Series wraps a gdf_column of size V + 1 (V: number of
vertices). The gdf column contains the offsets for the vertices in
this graph.
Offsets must be in the range [0, E] (E: number of edges)
index_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the destination index for each edge.
Destination indices must be in the range [0, V)
(V: number of vertices).
value_col : cudf.Series, optional (default=None)
This pointer can be ``None``. If not, this cudf.Series wraps a
gdf_column of size E (E: number of edges). The gdf column contains
the weight value for each edge. The expected type of
the gdf_column element is floating point number.
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
Examples
--------
>>> gdf = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> M = gdf.to_pandas()
>>> M = scipy.sparse.coo_matrix((M['2'],(M['0'],M['1'])))
>>> M = M.tocsr()
>>> offsets = cudf.Series(M.indptr)
>>> indices = cudf.Series(M.indices)
>>> G = cugraph.Graph()
>>> G.from_cudf_adjlist(offsets, indices, None)
"""
if self._Impl is None:
self._Impl = simpleGraphImpl(self.graph_properties)
elif type(self._Impl) is not simpleGraphImpl:
raise RuntimeError("Graph is already initialized")
elif self._Impl.edgelist is not None or self._Impl.adjlist is not None:
raise RuntimeError("Graph already has values")
self._Impl._simpleGraphImpl__from_adjlist(offset_col, index_col, value_col)
def from_dask_cudf_edgelist(
self,
input_ddf,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initializes the distributed graph from the dask_cudf.DataFrame
edgelist. Undirected Graphs are not currently supported.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
Note that the graph object will store a reference to the
dask_cudf.DataFrame provided.
Parameters
----------
input_ddf : dask_cudf.DataFrame
The edgelist as a dask_cudf.DataFrame
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str, optional (default='destination')
Destination column name or array of column names
edge_attr : str or List[str], optional (default=None)
Names of the edge attributes. Can either be a single string
representing the weight column name, or a list of length 3
holding [weight, edge_id, edge_type]. If this argument is
provided, then the weight/edge_id/edge_type arguments must
be left empty.
weight : str, optional (default=None)
Name of the weight column in the input dataframe.
edge_id : str, optional (default=None)
Name of the edge id column in the input dataframe.
edge_type : str, optional (default=None)
Name of the edge type column in the input dataframe.
renumber : bool, optional (default=True)
If source and destination indices are not in range 0 to V where V
is number of vertices, renumber argument should be True.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
"""
if self._Impl is None:
self._Impl = simpleDistributedGraphImpl(self.graph_properties)
elif type(self._Impl) is not simpleDistributedGraphImpl:
raise RuntimeError("Graph is already initialized")
elif self._Impl.edgelist is not None:
raise RuntimeError("Graph already has values")
self._Impl._simpleDistributedGraphImpl__from_edgelist(
input_ddf,
source=source,
destination=destination,
edge_attr=edge_attr,
weight=weight,
edge_id=edge_id,
edge_type=edge_type,
renumber=renumber,
store_transposed=store_transposed,
legacy_renum_only=legacy_renum_only,
)
# Move to Compat Module
def from_pandas_edgelist(
self,
pdf,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
):
"""
Initialize a graph from the edge list. It is an error to call this
method on an initialized Graph object. Source argument is source
column name and destination argument is destination column name.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
Weights, edge ids, and edge types can be passed through either the
edge_attr argument or individually as separate keyword arguments.
All three are optional.
Parameters
----------
pdf : pandas.DataFrame
A DataFrame that contains edge information
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str or array-like, optional (default='destination')
Destination column name or array of column names
edge_attr : str or List[str], optional (default=None)
Names of the edge attributes. Can either be a single string
representing the weight column name, or a list of length 3
holding [weight, edge_id, edge_type]. If this argument is
provided, then the weight/edge_id/edge_type arguments must
be left empty.
weight : str, optional (default=None)
Name of the weight column in the input dataframe.
edge_id : str, optional (default=None)
Name of the edge id column in the input dataframe.
edge_type : str, optional (default=None)
Name of the edge type column in the input dataframe.
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs.
Examples
--------
>>> # Download dataset from
>>> # https://github.com/rapidsai/cugraph/datasets/...
>>> df = pd.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... header=None, names=["0", "1", "2"],
... dtype={"0": "int32", "1": "int32",
... "2": "float32"})
>>> G = cugraph.Graph()
>>> G.from_pandas_edgelist(df, source='0', destination='1',
... edge_attr='2', renumber=False)
"""
if not isinstance(pdf, pd.core.frame.DataFrame):
raise TypeError("pdf input is not a Pandas DataFrame")
gdf = cudf.DataFrame.from_pandas(pdf)
self.from_cudf_edgelist(
gdf,
source=source,
destination=destination,
edge_attr=edge_attr,
weight=weight,
edge_id=edge_id,
edge_type=edge_type,
renumber=renumber,
)
def from_pandas_adjacency(self, pdf):
"""
Initializes the graph from pandas adjacency matrix.
Parameters
----------
pdf : pandas.DataFrame
A DataFrame that contains adjacency information
"""
if not isinstance(pdf, pd.core.frame.DataFrame):
raise TypeError("pdf input is not a Pandas DataFrame")
np_array = pdf.to_numpy()
columns = pdf.columns
self.from_numpy_array(np_array, columns)
def from_numpy_array(self, np_array, nodes=None):
"""
Initializes the graph from numpy array containing adjacency matrix.
Parameters
----------
np_array : numpy.array
A Numpy array that contains adjacency information
nodes: array-like or None, optional (default=None)
A list of column names, acting as labels for nodes
"""
np_array = np.asarray(np_array)
if len(np_array.shape) != 2:
raise ValueError("np_array is not a 2D matrix")
src, dst = np_array.nonzero()
weight = np_array[src, dst]
df = cudf.DataFrame()
if nodes is not None:
df["src"] = nodes[src]
df["dst"] = nodes[dst]
else:
df["src"] = src
df["dst"] = dst
df["weight"] = weight
self.from_cudf_edgelist(df, "src", "dst", edge_attr="weight")
def from_numpy_matrix(self, np_matrix):
"""
Initializes the graph from numpy matrix containing adjacency matrix.
Parameters
----------
np_matrix : numpy.matrix
A Numpy matrix that contains adjacency information
"""
if not isinstance(np_matrix, np.matrix):
raise TypeError("np_matrix input is not a Numpy matrix")
np_array = np.asarray(np_matrix)
self.from_numpy_array(np_array)
def unrenumber(self, df, column_name, preserve_order=False, get_column_names=False):
"""
Given a DataFrame containing internal vertex ids in the identified
column, replace this with external vertex ids. If the renumbering
is from a single column, the output dataframe will use the same
name for the external vertex identifiers. If the renumbering is from
a multi-column input, the output columns will be labeled 0 through
n-1 with a suffix of _column_name.
Note that this function does not guarantee order in single GPU mode,
and does not guarantee order or partitioning in multi-GPU mode. If you
wish to preserve ordering, add an index column to df and sort the
return by that index column.
Parameters
----------
df: cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing internal vertex identifiers that will be
converted into external vertex identifiers.
column_name: string
Name of the column containing the internal vertex id.
preserve_order: bool, optional (default=False)
If True, preserve the order of the rows in the output DataFrame to
match the input DataFrame
get_column_names: bool, optional (default=False)
If True, the unrenumbered column names are returned.
Returns
-------
df : cudf.DataFrame or dask_cudf.DataFrame
The original DataFrame columns exist unmodified. The external
vertex dentifiers are added to the DataFrame, the internal
vertex identifier column is removed from the dataframe.
"""
return self.renumber_map.unrenumber(
df, column_name, preserve_order, get_column_names
)
def lookup_internal_vertex_id(self, df, column_name=None):
"""
Given a DataFrame containing external vertex ids in the identified
columns, or a Series containing external vertex ids, return a
Series with the internal vertex ids.
Note that this function does not guarantee order in single GPU mode,
and does not guarantee order or partitioning in multi-GPU mode.
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
A DataFrame containing external vertex identifiers that will be
converted into internal vertex identifiers.
column_name: string, optional (default=None)
Name of the column containing the external vertex ids
Returns
-------
series : cudf.Series or dask_cudf.Series
The internal vertex identifiers
"""
return self.renumber_map.to_internal_vertex_id(df, column_name)
def add_internal_vertex_id(
self,
df,
internal_column_name,
external_column_name,
drop=True,
preserve_order=False,
):
"""
Given a DataFrame containing external vertex ids in the identified
columns, return a DataFrame containing the internal vertex ids as the
specified column name. Optionally drop the external vertex id columns.
Optionally preserve the order of the original DataFrame.
Parameters
----------
df: cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing external vertex identifiers that will be
converted into internal vertex identifiers.
internal_column_name: string
Name of column to contain the internal vertex id
external_column_name: string or list of strings
Name of the column(s) containing the external vertex ids
drop: bool, optional (default=True)
Drop the external columns from the returned DataFrame
preserve_order: bool, optional (default=False)
Preserve the order of the data frame (requires an extra sort)
Returns
-------
df : cudf.DataFrame or dask_cudf.DataFrame
Original DataFrame with new column containing internal vertex
id
"""
return self.renumber_map.add_internal_vertex_id(
df,
internal_column_name,
external_column_name,
drop,
preserve_order,
)
def clear(self):
"""
Empty the graph.
"""
self._Impl = None
def is_bipartite(self):
"""
Checks if Graph is bipartite. This solely relies on the user call of
add_nodes_from with the bipartite parameter. This does not parse the
graph to check if it is bipartite.
NOTE: Currently not implemented and always returns False
"""
# TO DO: Call coloring algorithm
return False
def is_multipartite(self):
"""
Checks if Graph is multipartite. This solely relies on the user call
of add_nodes_from with the partition parameter. This does not parse
the graph to check if it is multipartite.
NOTE: Currently not implemented and always returns False
"""
# TO DO: Call coloring algorithm
return False
def is_multigraph(self):
"""
Returns True if the graph is a multigraph. Else returns False.
NOTE: Currently not implemented and always returns False
"""
# TO DO: Call coloring algorithm
return False
def is_directed(self):
"""
Returns True if the graph is a directed graph.
Returns False if the graph is an undirected graph.
"""
return self.graph_properties.directed
def is_renumbered(self):
"""
Returns True if the graph is renumbered.
"""
return self.properties.renumbered
def is_weighted(self):
"""
Returns True if the graph has edge weights.
"""
return self.properties.weighted
def has_isolated_vertices(self):
"""
Returns True if the graph has isolated vertices.
"""
return self.properties.isolated_vertices
def is_remote(self):
"""
Returns True if the graph is remote; otherwise returns False.
"""
return False
def is_multi_gpu(self):
"""
Returns True if the graph is a multi-gpu graph; otherwise
returns False.
"""
return isinstance(self._Impl, simpleDistributedGraphImpl)
def to_directed(self):
"""
Return a directed representation of the graph.
This function sets the directed attribute as True and returns the
directed view.
Returns
-------
G : Graph
A directed graph with the same nodes, and each edge (u,v,weights)
replaced by two directed edges (u,v,weights) and (v,u,weights).
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> DiG = G.to_directed()
"""
directed_graph = type(self)()
directed_graph.graph_properties.directed = True
directed_graph._Impl = type(self._Impl)(directed_graph.graph_properties)
self._Impl.to_directed(directed_graph._Impl)
return directed_graph
def to_undirected(self):
"""
Return an undirected copy of the graph.
Returns
-------
G : Graph
A undirected graph with the same nodes, and each directed edge
(u,v,weights) replaced by an undirected edge (u,v,weights).
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> DiG = cugraph.Graph(directed=True)
>>> DiG.from_cudf_edgelist(M, '0', '1')
>>> G = DiG.to_undirected()
"""
if self.graph_properties.directed is False:
undirected_graph = type(self)()
elif self.__class__.__bases__[0] == object:
undirected_graph = type(self)()
else:
undirected_graph = self.__class__.__bases__[0]()
undirected_graph._Impl = type(self._Impl)(undirected_graph.graph_properties)
self._Impl.to_undirected(undirected_graph._Impl)
return undirected_graph
def add_nodes_from(self, nodes):
"""
Add nodes information to the Graph.
Parameters
----------
nodes : list or cudf.Series
The nodes of the graph to be stored.
"""
self._Impl._nodes["all_nodes"] = cudf.Series(nodes)
def density(self) -> float:
"""
Compute the density of the graph.
Density is the measure of how many edges are in the graph versus
the max number of edges that could be present.
Returns
-------
density : float
Density is the measure of how many edges are in the graph versus
the max number of edges that could be present.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> DiG = cugraph.Graph(directed=True)
>>> DiG.from_cudf_edgelist(M, '0', '1')
>>> density = G.density()
"""
if self.is_directed():
factor = 1
else:
factor = 2
num_e = self._Impl.number_of_edges(directed_edges=True)
num_v = self._Impl.number_of_vertices()
density = (factor * num_e) / (num_v * (num_v - 1))
return density
# TODO: Add function
# def properties():
class MultiGraph(Graph):
"""
A Multigraph; a Graph containing more than one edge between vertex pairs.
"""
def __init__(self, directed=False):
super(MultiGraph, self).__init__(directed=directed)
self.graph_properties.multi_edge = True
def is_multigraph(self):
"""
Returns True if the graph is a multigraph. Else returns False.
"""
# TO DO: Call coloring algorithm
return True
def density(self):
"""
Density is the measure of how many edges are in the graph versus
the max number of edges that could be present.
This function is not support on a Multigraph.
Since the maximal number of possible edges between any vertex pairs
can be greater than 1 (undirected) a realistic max number of possible
edges cannot be determined. Running density on a MultiGraph
could produce a density score greater than 1 - meaning more than
100% of possible edges are present in the Graph
"""
raise TypeError("The density function is not support on a Multigraph.")
class Tree(Graph):
"""
A Tree
"""
def __init__(self, directed=False):
super(Tree, self).__init__(directed=directed)
self.graph_properties.tree = True
class NPartiteGraph(Graph):
def __init__(self, bipartite=False, directed=False):
super(NPartiteGraph, self).__init__(directed=directed)
self.graph_properties.bipartite = bipartite
self.graph_properties.multipartite = True
def from_cudf_edgelist(
self,
input_df,
source="source",
destination="destination",
edge_attr=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initialize a graph from the edge list. It is an error to call this
method on an initialized Graph object. The passed input_df argument
wraps gdf_column objects that represent a graph using the edge list
format. source argument is source column name and destination argument
is destination column name.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
If weights are present, edge_attr argument is the weights column name.
Parameters
----------
input_df : cudf.DataFrame or dask_cudf.DataFrame
A DataFrame that contains edge information. If a
dask_cudf.DataFrame is passed it will be reinterpreted as a
cudf.DataFrame. For the distributed path please use
from_dask_cudf_edgelist.
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str or array-like, optional (default='destination')
Destination column name or array of column names
edge_attr : str or None, optional (default=None)
The weights column name
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
Examples
--------
>>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> G = cugraph.BiPartiteGraph()
>>> G.from_cudf_edgelist(df, source='0', destination='1',
... edge_attr='2', renumber=False)
"""
if self._Impl is None:
self._Impl = npartiteGraphImpl(self.graph_properties)
# API may change in future
self._Impl._npartiteGraphImpl__from_edgelist(
input_df,
source=source,
destination=destination,
edge_attr=edge_attr,
renumber=renumber,
)
def from_dask_cudf_edgelist(
self,
input_ddf,
source="source",
destination="destination",
edge_attr=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
"""
Initializes the distributed graph from the dask_cudf.DataFrame
edgelist. Undirected Graphs are not currently supported.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
Note that the graph object will store a reference to the
dask_cudf.DataFrame provided.
Parameters
----------
input_ddf : dask_cudf.DataFrame
The edgelist as a dask_cudf.DataFrame
source : str or array-like, optional (default='source')
Source column name or array of column names
destination : str, optional (default='destination')
Destination column name or array of column names
edge_attr : str, optional (default=None)
Weights column name.
renumber : bool, optional (default=True)
If source and destination indices are not in range 0 to V where V
is number of vertices, renumber argument should be True.
store_transposed : bool, optional (default=False)
If True, stores the transpose of the adjacency matrix. Required
for certain algorithms.
legacy_renum_only : bool, optional (default=False)
If True, skips the C++ renumbering step. Must be true for
pylibcugraph algorithms. Must be false for algorithms
not yet converted to the pylibcugraph C API.
This parameter is deprecated and will be removed.
"""
raise TypeError("Distributed N-partite graph not supported")
def add_nodes_from(self, nodes, bipartite=None, multipartite=None):
"""
Add nodes information to the Graph.
Parameters
----------
nodes : list or cudf.Series
The nodes of the graph to be stored. If bipartite and multipartite
arguments are not passed, the nodes are considered to be a list of
all the nodes present in the Graph.
bipartite : str, optional (default=None)
Sets the Graph as bipartite. The nodes are stored as a set of nodes
of the partition named as bipartite argument.
multipartite : str, optional (default=None)
Sets the Graph as multipartite. The nodes are stored as a set of
nodes of the partition named as multipartite argument.
"""
if self._Impl is None:
self._Impl = npartiteGraphImpl(self.graph_properties)
if bipartite is None and multipartite is None:
self._Impl._nodes["all_nodes"] = cudf.Series(nodes)
else:
self._Impl.add_nodes_from(
nodes, bipartite=bipartite, multipartite=multipartite
)
def is_multipartite(self):
"""
Checks if Graph is multipartite. This solely relies on the user call
of add_nodes_from with the partition parameter and the Graph created.
This does not parse the graph to check if it is multipartite.
"""
return True
class BiPartiteGraph(NPartiteGraph):
"""
A Bipartite Graph
"""
def __init__(self, directed=False):
super(BiPartiteGraph, self).__init__(directed=directed, bipartite=True)
def is_bipartite(self):
"""
Checks if Graph is bipartite. This solely relies on the user call of
add_nodes_from with the bipartite parameter and the Graph created.
This does not parse the graph to check if it is bipartite.
"""
return True
def is_directed(G):
"""
Returns True if the graph is a directed graph.
Returns False if the graph is an undirected graph.
"""
return G.is_directed()
def is_multigraph(G):
"""
Returns True if the graph is a multigraph. Else returns False.
"""
return G.is_multigraph()
def is_multipartite(G):
"""
Checks if Graph is multipartite. This solely relies on the Graph
type. This does not parse the graph to check if it is multipartite.
"""
return G.is_multipatite()
def is_bipartite(G):
"""
Checks if Graph is bipartite. This solely relies on the Graph type.
This does not parse the graph to check if it is bipartite.
"""
return G.is_bipartite()
def is_weighted(G):
"""
Returns True if the graph has edge weights.
"""
return G.is_weighted()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/utils_wrapper.pyx | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libc.stdint cimport uintptr_t
from cugraph.structure cimport utils as c_utils
from cugraph.structure.graph_primtypes cimport *
from libc.stdint cimport uintptr_t
import cudf
import numpy as np
def weight_type(weights):
weights_type = None
if weights is not None:
weights_type = weights.dtype
return weights_type
def create_csr_float(source_col, dest_col, weights):
num_verts = 0
num_edges = len(source_col)
cdef uintptr_t c_src = source_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_dst = dest_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_weights = <uintptr_t> NULL
if weights is not None:
c_weights = weights.__cuda_array_interface__['data'][0]
cdef GraphCOOView[int,int,float] in_graph
in_graph = GraphCOOView[int,int,float](<int*>c_src, <int*>c_dst, <float*>c_weights, num_verts, num_edges)
return csr_to_series(move(c_utils.coo_to_csr[int,int,float](in_graph)))
def create_csr_double(source_col, dest_col, weights):
num_verts = 0
num_edges = len(source_col)
cdef uintptr_t c_src = source_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_dst = dest_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_weights = <uintptr_t> NULL
if weights is not None:
c_weights = weights.__cuda_array_interface__['data'][0]
cdef GraphCOOView[int,int,double] in_graph
in_graph = GraphCOOView[int,int,double](<int*>c_src, <int*>c_dst, <double*>c_weights, num_verts, num_edges)
return csr_to_series(move(c_utils.coo_to_csr[int,int,double](in_graph)))
def coo2csr(source_col, dest_col, weights=None):
if len(source_col) != len(dest_col):
raise Exception("source_col and dest_col should have the same number of elements")
if source_col.dtype != dest_col.dtype:
raise Exception("source_col and dest_col should be the same type")
if source_col.dtype != np.int32:
raise Exception("source_col and dest_col must be type np.int32")
if len(source_col) == 0:
return cudf.Series(np.zeros(1, dtype=np.int32)), cudf.Series(np.zeros(1, dtype=np.int32)), weights
if weight_type(weights) == np.float64:
return create_csr_double(source_col, dest_col, weights)
else:
return create_csr_float(source_col, dest_col, weights)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/utils.pxd | # Copyright (c) 2019-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.structure.graph_primtypes cimport *
from libcpp.memory cimport unique_ptr
cdef extern from "cugraph/legacy/functions.hpp" namespace "cugraph":
cdef unique_ptr[GraphCSR[VT,ET,WT]] coo_to_csr[VT,ET,WT](
const GraphCOOView[VT,ET,WT] &graph) except +
cdef void comms_bcast[value_t](
const handle_t &handle,
value_t *dst,
size_t size) except +
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_primtypes.pyx | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
import numpy as np
from libc.stdint cimport uintptr_t
from libcpp.utility cimport move
from rmm._lib.device_buffer cimport DeviceBuffer
from cudf.core.buffer import as_buffer
import cudf
cdef move_device_buffer_to_column(
unique_ptr[device_buffer] device_buffer_unique_ptr, dtype):
"""
Transfers ownership of device_buffer_unique_ptr to a cuDF buffer which is
used to construct a cudf column object, which is then returned. If the
intermediate buffer is empty, the device_buffer_unique_ptr is still
transfered but None is returned.
"""
buff = DeviceBuffer.c_from_unique_ptr(move(device_buffer_unique_ptr))
buff = as_buffer(buff)
if buff.nbytes != 0:
column = cudf.core.column.build_column(buff, dtype=dtype)
return column
return None
cdef move_device_buffer_to_series(
unique_ptr[device_buffer] device_buffer_unique_ptr, dtype, series_name):
"""
Transfers ownership of device_buffer_unique_ptr to a cuDF buffer which is
used to construct a cudf.Series object with name series_name, which is then
returned. If the intermediate buffer is empty, the device_buffer_unique_ptr
is still transfered but None is returned.
"""
column = move_device_buffer_to_column(move(device_buffer_unique_ptr), dtype)
if column is not None:
series = cudf.Series._from_data({series_name: column})
return series
return None
cdef coo_to_df(GraphCOOPtrType graph):
# FIXME: this function assumes columns named "src" and "dst" and can only
# be used for SG graphs due to that assumption.
contents = move(graph.get()[0].release())
src = move_device_buffer_to_column(move(contents.src_indices), "int32")
dst = move_device_buffer_to_column(move(contents.dst_indices), "int32")
if GraphCOOPtrType is GraphCOOPtrFloat:
weight_type = "float32"
elif GraphCOOPtrType is GraphCOOPtrDouble:
weight_type = "float64"
else:
raise TypeError("Invalid GraphCOOPtrType")
wgt = move_device_buffer_to_column(move(contents.edge_data), weight_type)
df = cudf.DataFrame()
df['src'] = src
df['dst'] = dst
if wgt is not None:
df['weight'] = wgt
return df
cdef csr_to_series(GraphCSRPtrType graph):
contents = move(graph.get()[0].release())
csr_offsets = move_device_buffer_to_series(move(contents.offsets),
"int32", "csr_offsets")
csr_indices = move_device_buffer_to_series(move(contents.indices),
"int32", "csr_indices")
if GraphCSRPtrType is GraphCSRPtrFloat:
weight_type = "float32"
elif GraphCSRPtrType is GraphCSRPtrDouble:
weight_type = "float64"
else:
raise TypeError("Invalid GraphCSRPtrType")
csr_weights = move_device_buffer_to_series(move(contents.edge_data),
weight_type, "csr_weights")
return (csr_offsets, csr_indices, csr_weights)
cdef GraphCOOViewFloat get_coo_float_graph_view(input_graph, bool weighted=True):
# FIXME: this function assumes columns named "src" and "dst" and can only
# be used for SG graphs due to that assumption.
if not input_graph.edgelist:
input_graph.view_edge_list()
num_edges = input_graph.number_of_edges(directed_edges=True)
num_verts = input_graph.number_of_vertices()
cdef uintptr_t c_src = input_graph.edgelist.edgelist_df['src'].__cuda_array_interface__['data'][0]
cdef uintptr_t c_dst = input_graph.edgelist.edgelist_df['dst'].__cuda_array_interface__['data'][0]
cdef uintptr_t c_weights = <uintptr_t>NULL
# FIXME explicit check for None fails, different behavior than get_csr_graph_view
if input_graph.edgelist.weights and weighted:
c_weights = input_graph.edgelist.edgelist_df['weights'].__cuda_array_interface__['data'][0]
return GraphCOOViewFloat(<int*>c_src, <int*>c_dst, <float*>c_weights, num_verts, num_edges)
cdef GraphCOOViewDouble get_coo_double_graph_view(input_graph, bool weighted=True):
# FIXME: this function assumes columns named "src" and "dst" and can only
# be used for SG graphs due to that assumption.
if not input_graph.edgelist:
input_graph.view_edge_list()
num_edges = input_graph.number_of_edges(directed_edges=True)
num_verts = input_graph.number_of_vertices()
cdef uintptr_t c_src = input_graph.edgelist.edgelist_df['src'].__cuda_array_interface__['data'][0]
cdef uintptr_t c_dst = input_graph.edgelist.edgelist_df['dst'].__cuda_array_interface__['data'][0]
cdef uintptr_t c_weights = <uintptr_t>NULL
# FIXME explicit check for None fails, different behavior than get_csr_graph_view
if input_graph.edgelist.weights and weighted:
c_weights = input_graph.edgelist.edgelist_df['weights'].__cuda_array_interface__['data'][0]
return GraphCOOViewDouble(<int*>c_src, <int*>c_dst, <double*>c_weights, num_verts, num_edges)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/convert_matrix.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this file is pure python and no need to be a cython file. Once cugraph's
# issue #146 is addressed, this file's extension should be changed from .pyx to
# .py and should be located outside the python/cugraph/bindings directory.
import cudf
import dask_cudf
from cugraph.structure.graph_classes import Graph
# optional dependencies used for handling different input types
try:
import pandas as pd
except ModuleNotFoundError:
pd = None
def from_edgelist(
df,
source="source",
destination="destination",
edge_attr=None,
create_using=Graph,
renumber=True,
):
"""
Return a new graph created from the edge list representaion.
Parameters
----------
df : cudf.DataFrame, pandas.DataFrame, dask_cudf.core.DataFrame
This DataFrame contains columns storing edge source vertices,
destination (or target following NetworkX's terminology) vertices, and
(optional) weights.
source : string or integer, optional (default='source')
This is used to index the source column.
destination : string or integer, optional (default='destination')
This is used to index the destination (or target following NetworkX's
terminology) column.
edge_attr : string or integer, optional (default=None)
This pointer can be ``None``. If not, this is used to index the weight
column.
create_using: cugraph.Graph (instance or class), optional (default=Graph)
Specify the type of Graph to create. Can pass in an instance to create
a Graph instance with specified 'directed' attribute.
renumber : bool, optional (default=True)
If source and destination indices are not in range 0 to V where V
is number of vertices, renumber argument should be True.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G = cugraph.from_edgelist(M, source='0', destination='1',
... edge_attr='2')
"""
df_type = type(df)
if df_type is cudf.DataFrame:
return from_cudf_edgelist(
df,
source,
destination,
edge_attr=edge_attr,
create_using=create_using,
renumber=renumber,
)
elif (pd is not None) and (df_type is pd.DataFrame):
return from_pandas_edgelist(
df,
source,
destination,
edge_attr=edge_attr,
create_using=create_using,
renumber=renumber,
)
elif df_type is dask_cudf.core.DataFrame:
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type(Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
G.from_dask_cudf_edgelist(
df, source, destination, edge_attr=edge_attr, renumber=renumber
)
return G
else:
raise TypeError(f"obj of type {df_type} is not supported.")
def from_adjlist(offsets, indices, values=None, create_using=Graph):
"""
Initializes the graph from cuDF or Pandas Series representing adjacency
matrix CSR data and returns a new cugraph.Graph object.
Parameters
----------
offsets : cudf.Series, pandas.Series
The offsets of a CSR adjacency matrix.
indices : cudf.Series, pandas.Series
The indices of a CSR adjacency matrix.
values : cudf.Series or pandas.Series, optional (default=None)
The values in a CSR adjacency matrix, which represent edge weights in a
graph. If not provided, the resulting graph is considered unweighted.
create_using: cugraph.Graph (instance or class), optional (default=Graph)
Specify the type of Graph to create. Can pass in an instance to create
a Graph instance with specified 'directed' attribute.
Examples
--------
>>> pdf = pd.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype={0:'int32', 1:'int32', 2:'float32'},
... header=None)
>>> M = scipy.sparse.coo_matrix((pdf[2],(pdf[0],pdf[1])))
>>> M = M.tocsr()
>>> offsets = pd.Series(M.indptr)
>>> indices = pd.Series(M.indices)
>>> G = cugraph.from_adjlist(offsets, indices, None)
"""
offsets_type = type(offsets)
indices_type = type(indices)
if offsets_type != indices_type:
raise TypeError(
f"'offsets' type {offsets_type} != 'indices' " f"type {indices_type}"
)
if values is not None:
values_type = type(values)
if values_type != offsets_type:
raise TypeError(
f"'values' type {values_type} != 'offsets' " f"type {offsets_type}"
)
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type(Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
if offsets_type is cudf.Series:
G.from_cudf_adjlist(offsets, indices, values)
elif (pd is not None) and (offsets_type is pd.Series):
G.from_cudf_adjlist(
cudf.Series(offsets),
cudf.Series(indices),
None if values is None else cudf.Series(values),
)
else:
raise TypeError(f"obj of type {offsets_type} is not supported.")
return G
def from_cudf_edgelist(
df,
source="source",
destination="destination",
edge_attr=None,
create_using=Graph,
renumber=True,
):
"""
Return a new graph created from the edge list representaion. This function
is added for NetworkX compatibility (this function is a RAPIDS version of
NetworkX's from_pandas_edge_list()). This function does not support
multiple source or destination columns. But does support renumbering
Parameters
----------
df : cudf.DataFrame
This cudf.DataFrame contains columns storing edge source vertices,
destination (or target following NetworkX's terminology) vertices, and
(optional) weights.
source : string or integer, optional (default='source')
This is used to index the source column.
destination : string or integer, optional (default='destination')
This is used to index the destination (or target following NetworkX's
terminology) column.
edge_attr : string or integer, optional (default=None)
This pointer can be ``None``. If not, this is used to index the weight
column.
create_using: cugraph.Graph (instance or class), optional (default=Graph)
Specify the type of Graph to create. Can pass in an instance to create
a Graph instance with specified 'directed' attribute.
renumber : bool, optional (default=True)
If source and destination indices are not in range 0 to V where V
is number of vertices, renumber argument should be True.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G = cugraph.from_cudf_edgelist(M, source='0', destination='1',
... edge_attr='2')
"""
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type(Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
G.from_cudf_edgelist(
df,
source=source,
destination=destination,
edge_attr=edge_attr,
renumber=renumber,
)
return G
def from_pandas_edgelist(
df,
source="source",
destination="destination",
edge_attr=None,
create_using=Graph,
renumber=True,
):
"""
Initialize a graph from the edge list. It is an error to call this
method on an initialized Graph object. Source argument is source
column name and destination argument is destination column name.
By default, renumbering is enabled to map the source and destination
vertices into an index in the range [0, V) where V is the number
of vertices. If the input vertices are a single column of integers
in the range [0, V), renumbering can be disabled and the original
external vertex ids will be used.
If weights are present, edge_attr argument is the weights column name.
Parameters
----------
df : pandas.DataFrame
A DataFrame that contains edge information
source : str or array-like, optional (default='source')
source column name or array of column names
destination : str or array-like, optional (default='destination')
destination column name or array of column names
edge_attr : str or None, optional (default=None)
the weights column name.
renumber : bool, optional (default=True)
Indicate whether or not to renumber the source and destination
vertex IDs.
create_using: cugraph.Graph (instance or class), optional (default=Graph)
Specify the type of Graph to create. Can pass in an instance to create
a Graph instance with specified 'directed' attribute.
Returns
-------
G : cugraph.Graph
Graph containing edges from the pandas edgelist
Examples
--------
>>> # Download dataset from
>>> # https://github.com/rapidsai/cugraph/datasets/...
>>> df = pd.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... header=None, names=["0", "1", "2"],
... dtype={"0": "int32", "1": "int32", "2": "float32"})
>>> G = cugraph.Graph()
>>> G.from_pandas_edgelist(df, source='0', destination='1',
... edge_attr='2', renumber=False)
"""
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type(Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
G.from_pandas_edgelist(
df,
source=source,
destination=destination,
edge_attr=edge_attr,
renumber=renumber,
)
return G
def to_pandas_edgelist(G, source="src", destination="dst"):
"""
Returns the graph edge list as a Pandas DataFrame.
Parameters
----------
G : cugraph.Graph
Graph containing the edgelist.
source : str or array-like, optional (default='source')
source column name or array of column names
destination : str or array-like, optional (default='destination')
destination column name or array of column names
Returns
-------
df : pandas.DataFrame
pandas dataframe containing the edgelist as source and
destination columns.
"""
pdf = G.to_pandas_edgelist(source=source, destination=destination)
return pdf
def from_pandas_adjacency(df, create_using=Graph):
"""
Initializes the graph from pandas adjacency matrix.
Parameters
----------
df : pandas.DataFrame
A DataFrame that contains edge information
create_using: cugraph.Graph (instance or class), optional (default=Graph)
Specify the type of Graph to create. Can pass in an instance to create
a Graph instance with specified 'directed' attribute.
"""
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type(Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
G.from_pandas_adjacency(df)
return G
def to_pandas_adjacency(G):
"""
Returns the graph adjacency matrix as a Pandas DataFrame.
The row indices denote source and column names denote destination.
Parameters
----------
G : cugraph.Graph
Graph containing the adjacency matrix.
"""
pdf = G.to_pandas_adjacency()
return pdf
def from_numpy_array(A, create_using=Graph):
"""
Initializes the graph from numpy array containing adjacency matrix.
Parameters
----------
A : numpy.array
A Numpy array that contains adjacency information
create_using: cugraph.Graph (instance or class), optional (default=Graph)
Specify the type of Graph to create. Can pass in an instance to create
a Graph instance with specified 'directed' attribute.
"""
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type(Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
G.from_numpy_array(A)
return G
def to_numpy_array(G):
"""
Returns the graph adjacency matrix as a NumPy array.
Parameters
----------
G : cugraph.Graph
Graph containing the adjacency matrix.
"""
A = G.to_numpy_array()
return A
def from_numpy_matrix(A, create_using=Graph):
"""
Initializes the graph from numpy matrix containing adjacency matrix.
Parameters
----------
A : numpy.matrix
A Numpy matrix that contains adjacency information
create_using: cugraph.Graph (instance or class), optional (default=Graph)
Specify the type of Graph to create. Can pass in an instance to create
a Graph instance with specified 'directed' attribute.
"""
if create_using is None:
G = Graph()
elif isinstance(create_using, Graph):
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type(Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
G.from_numpy_matrix(A)
return G
def to_numpy_matrix(G):
"""
Returns the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : cugraph.Graph
Graph containing the adjacency matrix.
"""
A = G.to_numpy_matrix()
return A
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/CMakeLists.txt | # =============================================================================
# Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
# =============================================================================
set(cython_sources graph_primtypes_wrapper.pyx graph_primtypes.pyx utils_wrapper.pyx)
set(linked_libraries cugraph::cugraph)
rapids_cython_create_modules(
CXX
SOURCE_FILES "${cython_sources}"
LINKED_LIBRARIES "${linked_libraries}" MODULE_PREFIX structure_
ASSOCIATED_TARGETS cugraph
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_primtypes.pxd | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport pair
from libcpp.vector cimport vector
from pylibraft.common.handle cimport *
from rmm._lib.device_buffer cimport device_buffer
cdef extern from "cugraph/legacy/graph.hpp" namespace "cugraph::legacy":
ctypedef enum PropType:
PROP_UNDEF "cugraph::legacy::PROP_UNDEF"
PROP_FALSE "cugraph::legacy::PROP_FALSE"
PROP_TRUE "cugraph::legacy::PROP_TRUE"
ctypedef enum DegreeDirection:
DIRECTION_IN_PLUS_OUT "cugraph::legacy::DegreeDirection::IN_PLUS_OUT"
DIRECTION_IN "cugraph::legacy::DegreeDirection::IN"
DIRECTION_OUT "cugraph::legacy::DegreeDirection::OUT"
struct GraphProperties:
bool directed
bool weighted
bool multigraph
bool bipartite
bool tree
PropType has_negative_edges
cdef cppclass GraphViewBase[VT,ET,WT]:
WT *edge_data
handle_t *handle;
GraphProperties prop
VT number_of_vertices
ET number_of_edges
VT* local_vertices
ET* local_edges
VT* local_offsets
void set_handle(handle_t*)
void set_local_data(VT* local_vertices_, ET* local_edges_, VT* local_offsets_)
void get_vertex_identifiers(VT *) const
GraphViewBase(WT*,VT,ET)
cdef cppclass GraphCOOView[VT,ET,WT](GraphViewBase[VT,ET,WT]):
VT *src_indices
VT *dst_indices
void degree(ET *,DegreeDirection) const
GraphCOOView()
GraphCOOView(const VT *, const ET *, const WT *, size_t, size_t)
cdef cppclass GraphCompressedSparseBaseView[VT,ET,WT](GraphViewBase[VT,ET,WT]):
ET *offsets
VT *indices
void get_source_indices(VT *) const
void degree(ET *,DegreeDirection) const
GraphCompressedSparseBaseView(const VT *, const ET *, const WT *, size_t, size_t)
cdef cppclass GraphCSRView[VT,ET,WT](GraphCompressedSparseBaseView[VT,ET,WT]):
GraphCSRView()
GraphCSRView(const VT *, const ET *, const WT *, size_t, size_t)
cdef cppclass GraphCSCView[VT,ET,WT](GraphCompressedSparseBaseView[VT,ET,WT]):
GraphCSCView()
GraphCSCView(const VT *, const ET *, const WT *, size_t, size_t)
cdef cppclass GraphCOOContents[VT,ET,WT]:
VT number_of_vertices
ET number_of_edges
unique_ptr[device_buffer] src_indices
unique_ptr[device_buffer] dst_indices
unique_ptr[device_buffer] edge_data
cdef cppclass GraphCOO[VT,ET,WT]:
GraphCOO(
VT nv,
ET ne,
bool has_data) except+
GraphCOOContents[VT,ET,WT] release()
GraphCOOView[VT,ET,WT] view()
cdef cppclass GraphSparseContents[VT,ET,WT]:
VT number_of_vertices
ET number_of_edges
unique_ptr[device_buffer] offsets
unique_ptr[device_buffer] indices
unique_ptr[device_buffer] edge_data
cdef cppclass GraphCSC[VT,ET,WT]:
GraphCSC(
VT nv,
ET ne,
bool has_data) except+
GraphSparseContents[VT,ET,WT] release()
GraphCSCView[VT,ET,WT] view()
cdef cppclass GraphCSR[VT,ET,WT]:
GraphCSR(
VT nv,
ET ne,
bool has_data) except+
GraphSparseContents[VT,ET,WT] release()
GraphCSRView[VT,ET,WT] view()
cdef extern from "<utility>" namespace "std" nogil:
cdef unique_ptr[GraphCOO[int,int,float]] move(unique_ptr[GraphCOO[int,int,float]])
cdef unique_ptr[GraphCOO[int,int,double]] move(unique_ptr[GraphCOO[int,int,double]])
cdef GraphCOOContents[int,int,float] move(GraphCOOContents[int,int,float])
cdef GraphCOOContents[int,int,double] move(GraphCOOContents[int,int,double])
cdef device_buffer move(device_buffer)
cdef unique_ptr[device_buffer] move(unique_ptr[device_buffer])
cdef unique_ptr[GraphCSR[int,int,float]] move(unique_ptr[GraphCSR[int,int,float]])
cdef unique_ptr[GraphCSR[int,int,double]] move(unique_ptr[GraphCSR[int,int,double]])
cdef GraphSparseContents[int,int,float] move(GraphSparseContents[int,int,float])
cdef GraphSparseContents[int,int,double] move(GraphSparseContents[int,int,double])
ctypedef unique_ptr[GraphCOO[int,int,float]] GraphCOOPtrFloat
ctypedef unique_ptr[GraphCOO[int,int,double]] GraphCOOPtrDouble
ctypedef fused GraphCOOPtrType:
GraphCOOPtrFloat
GraphCOOPtrDouble
ctypedef unique_ptr[GraphCSR[int,int,float]] GraphCSRPtrFloat
ctypedef unique_ptr[GraphCSR[int,int,double]] GraphCSRPtrDouble
ctypedef fused GraphCSRPtrType:
GraphCSRPtrFloat
GraphCSRPtrDouble
ctypedef GraphCOOView[int,int,float] GraphCOOViewFloat
ctypedef GraphCOOView[int,int,double] GraphCOOViewDouble
ctypedef GraphCSRView[int,int,float] GraphCSRViewFloat
ctypedef GraphCSRView[int,int,double] GraphCSRViewDouble
cdef move_device_buffer_to_column(unique_ptr[device_buffer] device_buffer_unique_ptr, dtype)
cdef move_device_buffer_to_series(unique_ptr[device_buffer] device_buffer_unique_ptr, dtype, series_name)
cdef coo_to_df(GraphCOOPtrType graph)
cdef csr_to_series(GraphCSRPtrType graph)
cdef GraphCOOViewFloat get_coo_float_graph_view(input_graph, bool weighted=*)
cdef GraphCOOViewDouble get_coo_double_graph_view(input_graph, bool weighted=*)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/hypergraph.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2015, Graphistry, Inc.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Graphistry, Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL Graphistry, Inc BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cudf
import numpy as np
from cugraph.structure.graph_classes import Graph
def hypergraph(
values,
columns=None,
dropna=True,
direct=False,
graph_class=Graph,
categories=dict(),
drop_edge_attrs=False,
categorical_metadata=True,
SKIP=None,
EDGES=None,
DELIM="::",
SOURCE="src",
TARGET="dst",
WEIGHTS=None,
NODEID="node_id",
EVENTID="event_id",
ATTRIBID="attrib_id",
CATEGORY="category",
NODETYPE="node_type",
EDGETYPE="edge_type",
):
"""
Creates a hypergraph out of the given dataframe, returning the graph
components as dataframes. The transform reveals relationships between the
rows and unique values. This transform is useful for lists of events,
samples, relationships, and other structured high-dimensional data.
The transform creates a node for every row, and turns a row's column
entries into node attributes. If direct=False (default), every unique
value within a column is also turned into a node. Edges are added to
connect a row's nodes to each of its column nodes, or if direct=True, to
one another. Nodes are given the attribute specified by NODETYPE
that corresponds to the originating column name, or if a row EVENTID.
Consider a list of events. Each row represents a distinct event, and each
column some metadata about an event. If multiple events have common
metadata, they will be transitively connected through those metadata
values. Conversely, if an event has unique metadata, the unique metadata
will turn into nodes that only have connections to the event node.
For best results, set EVENTID to a row's unique ID, SKIP to all
non-categorical columns (or columns to all categorical columns),
and categories to group columns with the same kinds of values.
Parameters
----------
values : cudf.DataFrame
The input Dataframe to transform into a hypergraph.
columns : sequence, optional (default=None)
An optional sequence of column names to process.
dropna : bool, optional (default=True)
If True, do not include "null" values in the graph.
direct : bool, optional (default=False)
If True, omit hypernodes and instead strongly connect nodes for each
row with each other.
graph_class : cugraph.Graph, optional (default=cugraph.Graph)
Specify the type of Graph to create.
categories : dict, optional (default=dict())
Dictionary mapping column names to distinct categories. If the same
value appears columns mapped to the same category, the transform will
generate one node for it, instead of one for each column.
drop_edge_attrs : bool, optional, (default=False)
If True, exclude each row's attributes from its edges
categorical_metadata : bool, optional (default=True)
Whether to use cudf.CategoricalDtype for the ``CATEGORY``,
``NODETYPE``, and ``EDGETYPE`` columns. These columns are typically
large string columns with with low cardinality, and using categorical
dtypes can save a significant amount of memory.
SKIP : sequence, optional
A sequence of column names not to transform into nodes.
EDGES : dict, optional
When ``direct=True``, select column pairs instead of making all edges.
DELIM : str, optional (default="::")
The delimiter to use when joining column names, categories, and ids.
SOURCE : str, optional (default="src")
The name to use as the source column in the graph and edge DF.
TARGET : str, optional (default="dst")
The name to use as the target column in the graph and edge DF.
WEIGHTS : str, optional (default=None)
The column name from the input DF to map as the graph's edge weights.
NODEID : str, optional (default="node_id")
The name to use as the node id column in the graph and node DFs.
EVENTID : str, optional (default="event_id")
The name to use as the event id column in the graph and node DFs.
ATTRIBID : str, optional (default="attrib_id")
The name to use as the attribute id column in the graph and node DFs.
CATEGORY : str, optional (default "category")
The name to use as the category column in the graph and DFs.
NODETYPE : str, optional (default="node_type")
The name to use as the node type column in the graph and node DFs.
EDGETYPE : str, optional (default="edge_type")
The name to use as the edge type column in the graph and edge DF.
Returns
-------
result : dict {"nodes", "edges", "graph", "events", "entities"}
nodes : cudf.DataFrame
A DataFrame of found entity and hyper node attributes.
edges : cudf.DataFrame
A DataFrame of edge attributes.
graph : cugraph.Graph
A Graph of the found entity nodes, hyper nodes, and edges.
events : cudf.DataFrame
If direct=True, a DataFrame of hyper node attributes, else empty.
entities : cudf.DataFrame
A DataFrame of the found entity node attributes.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... names=['src', 'dst', 'weights'],
... dtype=['int32', 'int32', 'float32'], header=None)
>>> nodes, edges, G, events, entities = cugraph.hypergraph(M)
"""
columns = values.columns if columns is None else columns
columns = sorted(
list(columns if SKIP is None else [x for x in columns if x not in SKIP])
)
events = values.copy(deep=False)
events.reset_index(drop=True, inplace=True)
if EVENTID not in events.columns:
events[EVENTID] = cudf.core.index.RangeIndex(len(events))
events[EVENTID] = _prepend_str(events[EVENTID], EVENTID + DELIM)
events[NODETYPE] = (
"event"
if not categorical_metadata
else _str_scalar_to_category(len(events), "event")
)
if not dropna:
for key, col in events[columns].items():
if cudf.api.types.is_string_dtype(col.dtype):
events[key].fillna("null", inplace=True)
edges = None
nodes = None
entities = _create_entity_nodes(
events,
columns,
dropna=dropna,
categories=categories,
categorical_metadata=categorical_metadata,
DELIM=DELIM,
NODEID=NODEID,
CATEGORY=CATEGORY,
NODETYPE=NODETYPE,
)
if direct:
edges = _create_direct_edges(
events,
columns,
dropna=dropna,
edge_shape=EDGES,
categories=categories,
drop_edge_attrs=drop_edge_attrs,
categorical_metadata=categorical_metadata,
DELIM=DELIM,
SOURCE=SOURCE,
TARGET=TARGET,
EVENTID=EVENTID,
CATEGORY=CATEGORY,
EDGETYPE=EDGETYPE,
NODETYPE=NODETYPE,
)
nodes = entities
events = cudf.DataFrame()
else:
SOURCE = ATTRIBID
TARGET = EVENTID
edges = _create_hyper_edges(
events,
columns,
dropna=dropna,
categories=categories,
drop_edge_attrs=drop_edge_attrs,
categorical_metadata=categorical_metadata,
DELIM=DELIM,
EVENTID=EVENTID,
ATTRIBID=ATTRIBID,
CATEGORY=CATEGORY,
EDGETYPE=EDGETYPE,
NODETYPE=NODETYPE,
)
# Concatenate regular nodes and hyper nodes
events = _create_hyper_nodes(
events,
NODEID=NODEID,
EVENTID=EVENTID,
CATEGORY=CATEGORY,
NODETYPE=NODETYPE,
categorical_metadata=categorical_metadata,
)
nodes = cudf.concat([entities, events])
nodes.reset_index(drop=True, inplace=True)
if WEIGHTS is not None:
if WEIGHTS not in edges:
WEIGHTS = None
else:
edges[WEIGHTS].fillna(0, inplace=True)
graph = graph_class()
graph.from_cudf_edgelist(
edges,
# force using renumber_from_cudf
source=[SOURCE],
destination=[TARGET],
edge_attr=WEIGHTS,
renumber=True,
)
return {
"nodes": nodes,
"edges": edges,
"graph": graph,
"events": events,
"entities": entities,
}
def _create_entity_nodes(
events,
columns,
dropna=True,
categorical_metadata=False,
categories=dict(),
DELIM="::",
NODEID="node_id",
CATEGORY="category",
NODETYPE="node_type",
):
nodes = [
cudf.DataFrame(
dict(
[
(NODEID, cudf.core.column.column_empty(0, "str")),
(
CATEGORY,
cudf.core.column.column_empty(
0, "str" if not categorical_metadata else _empty_cat_dt()
),
),
(
NODETYPE,
cudf.core.column.column_empty(
0, "str" if not categorical_metadata else _empty_cat_dt()
),
),
]
+ [
(key, cudf.core.column.column_empty(0, col.dtype))
for key, col in events[columns].items()
]
)
)
]
for key, col in events[columns].items():
cat = categories.get(key, key)
col = col.unique().sort_values()
col = col.nans_to_nulls().dropna() if dropna else col
if len(col) == 0:
continue
df = cudf.DataFrame(
{
key: cudf.core.column.as_column(col),
NODEID: _prepend_str(col, cat + DELIM),
CATEGORY: cat
if not categorical_metadata
else _str_scalar_to_category(len(col), cat),
NODETYPE: key
if not categorical_metadata
else _str_scalar_to_category(len(col), key),
}
)
df.reset_index(drop=True, inplace=True)
nodes.append(df)
nodes = cudf.concat(nodes)
nodes = nodes.drop_duplicates(subset=[NODEID])
nodes = nodes[[NODEID, NODETYPE, CATEGORY] + list(columns)]
nodes.reset_index(drop=True, inplace=True)
return nodes
def _create_hyper_nodes(
events,
categorical_metadata=False,
NODEID="node_id",
EVENTID="event_id",
CATEGORY="category",
NODETYPE="node_type",
):
nodes = events.copy(deep=False)
if NODEID in nodes:
nodes.drop(columns=[NODEID], inplace=True)
if NODETYPE in nodes:
nodes.drop(columns=[NODETYPE], inplace=True)
if CATEGORY in nodes:
nodes.drop(columns=[CATEGORY], inplace=True)
nodes[NODETYPE] = (
EVENTID
if not categorical_metadata
else _str_scalar_to_category(len(nodes), EVENTID)
)
nodes[CATEGORY] = (
"event"
if not categorical_metadata
else _str_scalar_to_category(len(nodes), "event")
)
nodes[NODEID] = nodes[EVENTID]
nodes.reset_index(drop=True, inplace=True)
return nodes
def _create_hyper_edges(
events,
columns,
dropna=True,
categories=dict(),
drop_edge_attrs=False,
categorical_metadata=False,
DELIM="::",
EVENTID="event_id",
ATTRIBID="attrib_id",
CATEGORY="category",
EDGETYPE="edge_type",
NODETYPE="node_type",
):
edge_attrs = [x for x in events.columns if x != NODETYPE]
edges = [
cudf.DataFrame(
dict(
(
[
(EVENTID, cudf.core.column.column_empty(0, "str")),
(ATTRIBID, cudf.core.column.column_empty(0, "str")),
(
EDGETYPE,
cudf.core.column.column_empty(
0,
"str" if not categorical_metadata else _empty_cat_dt(),
),
),
]
)
+ (
[]
if len(categories) == 0
else [
(
CATEGORY,
cudf.core.column.column_empty(
0,
"str" if not categorical_metadata else _empty_cat_dt(),
),
)
]
)
+ (
[]
if drop_edge_attrs
else [
(key, cudf.core.column.column_empty(0, col.dtype))
for key, col in events[edge_attrs].items()
]
)
)
)
]
for key, col in events[columns].items():
cat = categories.get(key, key)
fs = [EVENTID] + ([key] if drop_edge_attrs else edge_attrs)
df = events[fs].dropna(subset=[key]) if dropna else events[fs]
if len(df) == 0:
continue
if len(categories) > 0:
df[CATEGORY] = (
key
if not categorical_metadata
else _str_scalar_to_category(len(df), key)
)
df[EDGETYPE] = (
cat if not categorical_metadata else _str_scalar_to_category(len(df), cat)
)
df[ATTRIBID] = _prepend_str(col, cat + DELIM)
df.reset_index(drop=True, inplace=True)
edges.append(df)
columns = [EVENTID, EDGETYPE, ATTRIBID]
if len(categories) > 0:
columns += [CATEGORY]
if not drop_edge_attrs:
columns += edge_attrs
edges = cudf.concat(edges)[columns]
edges.reset_index(drop=True, inplace=True)
return edges
def _create_direct_edges(
events,
columns,
dropna=True,
categories=dict(),
edge_shape=None,
drop_edge_attrs=False,
categorical_metadata=False,
DELIM="::",
SOURCE="src",
TARGET="dst",
EVENTID="event_id",
CATEGORY="category",
EDGETYPE="edge_type",
NODETYPE="node_type",
):
if edge_shape is None:
edge_shape = {}
for i, name in enumerate(columns):
edge_shape[name] = columns[(i + 1) :]
edge_attrs = [x for x in events.columns if x != NODETYPE]
edges = [
cudf.DataFrame(
dict(
(
[
(EVENTID, cudf.core.column.column_empty(0, "str")),
(SOURCE, cudf.core.column.column_empty(0, "str")),
(TARGET, cudf.core.column.column_empty(0, "str")),
(
EDGETYPE,
cudf.core.column.column_empty(
0,
"str" if not categorical_metadata else _empty_cat_dt(),
),
),
]
)
+ (
[]
if len(categories) == 0
else [
(
CATEGORY,
cudf.core.column.column_empty(
0,
"str" if not categorical_metadata else _empty_cat_dt(),
),
)
]
)
+ (
[]
if drop_edge_attrs
else [
(key, cudf.core.column.column_empty(0, col.dtype))
for key, col in events[edge_attrs].items()
]
)
)
)
]
for key1, col1 in events[sorted(edge_shape.keys())].items():
cat1 = categories.get(key1, key1)
if isinstance(edge_shape[key1], str):
edge_shape[key1] = [edge_shape[key1]]
elif isinstance(edge_shape[key1], dict):
edge_shape[key1] = list(edge_shape[key1].keys())
elif not isinstance(edge_shape[key1], (set, list, tuple)):
raise ValueError("EDGES must be a dict of column name(s)")
for key2, col2 in events[sorted(edge_shape[key1])].items():
cat2 = categories.get(key2, key2)
fs = [EVENTID] + ([key1, key2] if drop_edge_attrs else edge_attrs)
df = events[fs].dropna(subset=[key1, key2]) if dropna else events[fs]
if len(df) == 0:
continue
if len(categories) > 0:
df[CATEGORY] = (
key1 + DELIM + key2
if not categorical_metadata
else _str_scalar_to_category(len(df), key1 + DELIM + key2)
)
df[EDGETYPE] = (
cat1 + DELIM + cat2
if not categorical_metadata
else _str_scalar_to_category(len(df), cat1 + DELIM + cat2)
)
df[SOURCE] = _prepend_str(col1, cat1 + DELIM)
df[TARGET] = _prepend_str(col2, cat2 + DELIM)
df.reset_index(drop=True, inplace=True)
edges.append(df)
columns = [EVENTID, EDGETYPE, SOURCE, TARGET]
if len(categories) > 0:
columns += [CATEGORY]
if not drop_edge_attrs:
columns += edge_attrs
edges = cudf.concat(edges)[columns]
edges.reset_index(drop=True, inplace=True)
return edges
def _str_scalar_to_category(size, val):
return cudf.core.column.build_categorical_column(
categories=cudf.core.column.as_column([val], dtype="str"),
codes=cudf.core.column.column.full(size, 0, dtype=np.int32),
mask=None,
size=size,
offset=0,
null_count=0,
ordered=False,
)
def _prepend_str(col, val):
return val + col.astype(str).fillna("null")
# Make an empty categorical string dtype
def _empty_cat_dt():
return cudf.core.dtypes.CategoricalDtype(
categories=np.array([], dtype="str"), ordered=False
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_utilities.pxd | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from libcpp cimport bool
from libcpp.memory cimport unique_ptr
from libcpp.utility cimport pair
from libcpp.vector cimport vector
from rmm._lib.device_buffer cimport device_buffer
from pylibraft.common.handle cimport handle_t
cdef extern from "<utility>" namespace "std" nogil:
cdef device_buffer move(device_buffer)
cdef unique_ptr[device_buffer] move(unique_ptr[device_buffer])
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/number_map.py | # Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections.abc import Iterable
import dask_cudf
import numpy as np
import cudf
import warnings
class NumberMap:
class SingleGPU:
def __init__(self, df, src_col_names, dst_col_names, id_type, store_transposed):
self.col_names = NumberMap.compute_vals(src_col_names)
# FIXME: rename the next two attributes to its singular conterpart as there
# is only one 'src' and 'dst' col name
self.src_col_names = src_col_names
self.dst_col_names = dst_col_names
self.df = df
self.id_type = id_type
self.store_transposed = store_transposed
self.numbered = False
def to_internal_vertex_id(self, df, col_names):
tmp_df = df[col_names].rename(
columns=dict(zip(col_names, self.col_names)), copy=False
)
index_name = NumberMap.generate_unused_column_name(df.columns)
tmp_df[index_name] = tmp_df.index
return (
self.df.merge(tmp_df, on=self.col_names, how="right")
.sort_values(index_name)
.drop(columns=[index_name])
.reset_index()["id"]
)
def from_internal_vertex_id(
self, df, internal_column_name, external_column_names
):
tmp_df = self.df.merge(
df,
right_on=internal_column_name,
left_on="id",
how="right",
)
if internal_column_name != "id":
tmp_df = tmp_df.drop(columns=["id"])
if external_column_names is None:
return tmp_df
else:
return tmp_df.rename(
columns=dict(zip(self.col_names, external_column_names)),
copy=False,
)
def add_internal_vertex_id(
self, df, id_column_name, col_names, drop, preserve_order
):
ret = None
if preserve_order:
index_name = NumberMap.generate_unused_column_name(df.columns)
tmp_df = df
tmp_df[index_name] = tmp_df.index
else:
tmp_df = df
if "id" in df.columns:
id_name = NumberMap.generate_unused_column_name(tmp_df.columns)
merge_df = self.df.rename(columns={"id": id_name}, copy=False)
else:
id_name = "id"
merge_df = self.df
if col_names is None:
ret = merge_df.merge(tmp_df, on=self.col_names, how="right")
elif col_names == self.col_names:
ret = merge_df.merge(tmp_df, on=self.col_names, how="right")
else:
ret = merge_df.merge(
tmp_df,
right_on=col_names,
left_on=self.col_names,
how="right",
).drop(columns=self.col_names)
if drop:
ret = ret.drop(columns=col_names)
ret = ret.rename(columns={id_name: id_column_name}, copy=False)
if preserve_order:
ret = ret.sort_values(index_name).reset_index(drop=True)
return ret
def indirection_map(self, df, src_col_names, dst_col_names):
# src_col_names and dst_col_names are lists
tmp_df = cudf.DataFrame()
tmp = (
df[src_col_names]
.groupby(src_col_names)
.count()
.reset_index()
.rename(
columns=dict(zip(src_col_names, self.col_names)),
copy=False,
)
)
if dst_col_names is not None:
tmp_dst = df[dst_col_names].groupby(dst_col_names).count().reset_index()
# Need to have the same column names before both df can be
# concat
tmp_dst.columns = tmp.columns
tmp_df = cudf.concat([tmp, tmp_dst])
else:
newname = self.col_names
tmp_df = tmp[newname]
tmp_df = tmp_df.groupby(self.col_names).count().reset_index()
tmp_df["id"] = tmp_df.index.astype(self.id_type)
self.df = tmp_df
return tmp_df
class MultiGPU:
def __init__(
self, ddf, src_col_names, dst_col_names, id_type, store_transposed
):
self.col_names = NumberMap.compute_vals(src_col_names)
self.src_col_names = src_col_names
self.dst_col_names = dst_col_names
self.val_types = NumberMap.compute_vals_types(ddf, src_col_names)
self.val_types["count"] = np.int32
self.id_type = id_type
self.ddf = ddf
self.store_transposed = store_transposed
self.numbered = False
def to_internal_vertex_id(self, ddf, col_names):
tmp_ddf = ddf[col_names].rename(
columns=dict(zip(col_names, self.col_names))
)
for name in self.col_names:
tmp_ddf[name] = tmp_ddf[name].astype(self.ddf[name].dtype)
x = self.ddf.merge(
tmp_ddf,
on=self.col_names,
how="right",
)
return x["global_id"]
def from_internal_vertex_id(
self, df, internal_column_name, external_column_names
):
tmp_df = self.ddf.merge(
df, right_on=internal_column_name, left_on="global_id", how="right"
).map_partitions(lambda df: df.drop(columns="global_id"))
if external_column_names is None:
return tmp_df
else:
return tmp_df.map_partitions(
lambda df: df.rename(
columns=dict(zip(self.col_names, external_column_names)),
copy=False,
)
)
def add_internal_vertex_id(
self, ddf, id_column_name, col_names, drop, preserve_order
):
# At the moment, preserve_order cannot be done on
# multi-GPU
if preserve_order:
raise Exception("preserve_order not supported for multi-GPU")
ret = None
if col_names is None:
ret = self.ddf.merge(ddf, on=self.col_names, how="right")
elif col_names == self.col_names:
ret = self.ddf.merge(ddf, on=col_names, how="right")
else:
ret = self.ddf.merge(
ddf, right_on=col_names, left_on=self.col_names
).map_partitions(lambda df: df.drop(columns=self.col_names))
if drop:
ret = ret.map_partitions(lambda df: df.drop(columns=col_names))
ret = ret.map_partitions(
lambda df: df.rename(columns={"global_id": id_column_name}, copy=False)
)
return ret
def indirection_map(self, ddf, src_col_names, dst_col_names):
tmp = (
ddf[src_col_names]
.groupby(src_col_names)
.count()
.reset_index()
.rename(
columns=dict(zip(src_col_names, self.col_names)),
)
)
if dst_col_names is not None:
tmp_dst = (
ddf[dst_col_names].groupby(dst_col_names).count().reset_index()
)
tmp_dst.columns = tmp.columns
tmp_df = dask_cudf.concat([tmp, tmp_dst])
else:
newname = self.col_names
tmp_df = tmp[newname]
tmp_ddf = tmp_df.groupby(self.col_names).count().reset_index()
# Set global index
tmp_ddf = tmp_ddf.assign(idx=1)
# ensure the original vertex and the 'global_id' columns are
# of the same type unless the original vertex type is 'string'
tmp_ddf["global_id"] = tmp_ddf.idx.cumsum().astype(self.id_type) - 1
tmp_ddf = tmp_ddf.drop(columns="idx")
tmp_ddf = tmp_ddf.persist()
self.ddf = tmp_ddf
return tmp_ddf
def __init__(
self,
renumber_id_type=np.int32,
unrenumbered_id_type=np.int32,
is_renumbered=False,
):
self.implementation = None
self.renumber_id_type = renumber_id_type
self.unrenumbered_id_type = unrenumbered_id_type
self.is_renumbered = is_renumbered
# The default src/dst column names in the resulting renumbered
# dataframe. These may be updated by the renumbering methods if the
# input dataframe uses the default names.
self.renumbered_src_col_name = "renumbered_src"
self.renumbered_dst_col_name = "renumbered_dst"
# This dataframe maps internal to external vertex IDs.
# The column name 'id' contains the renumbered vertices and the other column(s)
# contain the original vertices
self.df_internal_to_external = None
self.internal_to_external_col_names = {}
@staticmethod
def compute_vals_types(df, column_names):
"""
Helper function to compute internal column names and types
"""
return {str(i): df[column_names[i]].dtype for i in range(len(column_names))}
@staticmethod
def generate_unused_column_name(column_names, start_with_name="col"):
"""
Helper function to generate an unused column name
"""
name = start_with_name
counter = 2
while name in column_names:
name = f"{start_with_name}{counter}"
counter += 1
return name
@staticmethod
def compute_vals(column_names):
"""
Helper function to compute internal column names based on external
column names
"""
return [str(i) for i in range(len(column_names))]
def to_internal_vertex_id(self, df, col_names=None):
"""
Given a collection of external vertex ids, return the internal
vertex ids
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
Contains a list of external vertex identifiers that will be
converted into internal vertex identifiers
col_names: (optional) list of strings
This list of 1 or more strings contain the names
of the columns that uniquely identify an external
vertex identifier
Returns
---------
vertex_ids : cudf.Series or dask_cudf.Series
The vertex identifiers. Note that to_internal_vertex_id
does not guarantee order or partitioning (in the case of
dask_cudf) of vertex ids. If order matters use
add_internal_vertex_id
"""
tmp_df = None
tmp_col_names = None
if type(df) is cudf.Series:
tmp_df = cudf.DataFrame()
tmp_df["0"] = df
tmp_col_names = ["0"]
elif type(df) is dask_cudf.Series:
tmp_df = df.to_frame()
tmp_col_names = tmp_df.columns
else:
tmp_df = df
tmp_col_names = col_names
reply = self.implementation.to_internal_vertex_id(tmp_df, tmp_col_names)
return reply
def add_internal_vertex_id(
self, df, id_column_name="id", col_names=None, drop=False, preserve_order=False
):
"""
Given a collection of external vertex ids, return the internal vertex
ids combined with the input data.
If a series-type input is provided then the series will be in a column
named '0'. Otherwise the input column names in the DataFrame will be
preserved.
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
Contains a list of external vertex identifiers that will be
converted into internal vertex identifiers
id_column_name: string, optional (default="id")
The name to be applied to the column containing the id
col_names: list of strings, optional (default=None)
This list of 1 or more strings contain the names
of the columns that uniquely identify an external
vertex identifier
drop: boolean, optional (default=False)
If True, drop the column names specified in col_names from
the returned DataFrame.
preserve_order: boolean, optional (default=False)
If True, do extra sorting work to preserve the order
of the input DataFrame.
Returns
---------
df : cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing the input data (DataFrame or series)
with an additional column containing the internal vertex id.
Note that there is no guarantee of the order or partitioning
of elements in the returned DataFrame.
"""
tmp_df = None
tmp_col_names = None
can_drop = True
if type(df) is cudf.Series:
tmp_df = df.to_frame("0")
tmp_col_names = ["0"]
can_drop = False
elif type(df) is dask_cudf.Series:
tmp_df = df.to_frame("0")
tmp_col_names = ["0"]
can_drop = False
else:
tmp_df = df
if isinstance(col_names, list):
tmp_col_names = col_names
else:
tmp_col_names = [col_names]
return self.implementation.add_internal_vertex_id(
tmp_df, id_column_name, tmp_col_names, (drop and can_drop), preserve_order
)
def from_internal_vertex_id(
self,
df,
internal_column_name=None,
external_column_names=None,
drop=False,
):
"""
Given a collection of internal vertex ids, return a DataFrame of
the external vertex ids
Parameters
----------
df: cudf.DataFrame, cudf.Series, dask_cudf.DataFrame, dask_cudf.Series
A list of internal vertex identifiers that will be
converted into external vertex identifiers. If df is a series type
object it will be converted to a dataframe where the series is
in a column labeled 'id'. If df is a dataframe type object
then internal_column_name should identify which column corresponds
the the internal vertex id that should be converted
internal_column_name: string, optional (default=None)
Name of the column containing the internal vertex id.
If df is a series then this parameter is ignored. If df is
a DataFrame this parameter is required.
external_column_names: string or list of str, optional (default=None)
Name of the columns that define an external vertex id.
If not specified, columns will be labeled '0', '1,', ..., 'n-1'
drop: boolean, optional (default=False)
If True the internal column name will be dropped from the
DataFrame.
Returns
---------
df : cudf.DataFrame or dask_cudf.DataFrame
The original DataFrame columns exist unmodified. Columns
are added to the DataFrame to identify the external vertex
identifiers. If external_columns is specified, these names
are used as the names of the output columns. If external_columns
is not specifed the columns are labeled '0', ... 'n-1' based on
the number of columns identifying the external vertex identifiers.
"""
tmp_df = None
can_drop = True
if type(df) is cudf.Series:
tmp_df = df.to_frame("id")
internal_column_name = "id"
can_drop = False
elif type(df) is dask_cudf.Series:
tmp_df = df.to_frame("id")
internal_column_name = "id"
can_drop = False
else:
tmp_df = df
output_df = self.implementation.from_internal_vertex_id(
tmp_df, internal_column_name, external_column_names
)
if drop and can_drop:
return output_df.drop(columns=internal_column_name)
return output_df
@staticmethod
def renumber_and_segment(
df,
src_col_names,
dst_col_names,
preserve_order=False,
store_transposed=False,
legacy_renum_only=False,
):
"""
Given an input dataframe with its column names, this function returns the
renumbered dataframe(if renumbering occured) along with a mapping from internal
to external vertex IDs. the parameter 'preserve_order' ensures that the order
of the edges is preserved during renumbering.
"""
if legacy_renum_only:
warning_msg = (
"The parameter 'legacy_renum_only' is deprecated and will be removed."
)
warnings.warn(warning_msg, DeprecationWarning)
renumbered = False
# For columns with mismatch dtypes, set the renumbered
# id_type to either 'int32' or 'int64'
if isinstance(src_col_names, list):
vertex_col_names = src_col_names.copy()
else:
vertex_col_names = [src_col_names]
if isinstance(dst_col_names, list):
vertex_col_names += dst_col_names
else:
vertex_col_names += [dst_col_names]
if df[vertex_col_names].dtypes.nunique() > 1:
# can't determine the edgelist input type
unrenumbered_id_type = None
else:
unrenumbered_id_type = df.dtypes[0]
if np.int64 in list(df.dtypes):
renumber_id_type = np.int64
else:
# renumber the edgelist to 'int32'
renumber_id_type = np.int32
# Renumbering occurs only if:
# 1) The column names are lists (multi-column vertices)
if isinstance(src_col_names, list):
renumbered = True
# 2) There are non-integer vertices
elif not (
df[src_col_names].dtype == np.int32 or df[src_col_names].dtype == np.int64
):
renumbered = True
renumber_map = NumberMap(renumber_id_type, unrenumbered_id_type, renumbered)
renumber_map.input_src_col_names = src_col_names
renumber_map.input_dst_col_names = dst_col_names
if not isinstance(renumber_map.input_src_col_names, list):
src_col_names = [src_col_names]
dst_col_names = [dst_col_names]
# Assign the new src and dst column names to be used in the renumbered
# dataframe to return (renumbered_src_col_name and
# renumbered_dst_col_name)
renumber_map.set_renumbered_col_names(src_col_names, dst_col_names, df.columns)
# FIXME: Remove 'src_col_names' and 'dst_col_names' from this initialization as
# those will capture 'simpleGraph.srcCol' and 'simpleGraph.dstCol'.
# In fact the input src and dst col names are already captured in
# 'renumber_map.input_src_col_names' and 'renumber_map.input_dst_col_names'.
if isinstance(df, cudf.DataFrame):
renumber_map.implementation = NumberMap.SingleGPU(
df,
src_col_names,
dst_col_names,
renumber_map.renumber_id_type,
store_transposed,
)
elif isinstance(df, dask_cudf.DataFrame):
renumber_map.implementation = NumberMap.MultiGPU(
df,
src_col_names,
dst_col_names,
renumber_map.renumber_id_type,
store_transposed,
)
else:
raise TypeError("df must be cudf.DataFrame or dask_cudf.DataFrame")
if renumbered:
renumber_map.implementation.indirection_map(
df, src_col_names, dst_col_names
)
if isinstance(df, dask_cudf.DataFrame):
renumber_map.df_internal_to_external = renumber_map.implementation.ddf
else:
renumber_map.df_internal_to_external = renumber_map.implementation.df
df = renumber_map.add_internal_vertex_id(
df,
renumber_map.renumbered_src_col_name,
src_col_names,
drop=True,
preserve_order=preserve_order,
)
df = renumber_map.add_internal_vertex_id(
df,
renumber_map.renumbered_dst_col_name,
dst_col_names,
drop=True,
preserve_order=preserve_order,
)
else:
# Update the renumbered source and destination column name
# with the original input's source and destination name
renumber_map.renumbered_src_col_name = src_col_names[0]
renumber_map.renumbered_dst_col_name = dst_col_names[0]
return df, renumber_map
@staticmethod
def renumber(
df,
src_col_names,
dst_col_names,
preserve_order=False,
store_transposed=False,
legacy_renum_only=False,
):
return NumberMap.renumber_and_segment(
df,
src_col_names,
dst_col_names,
preserve_order,
store_transposed,
legacy_renum_only,
)[0:2]
def unrenumber(self, df, column_name, preserve_order=False, get_column_names=False):
"""
Given a DataFrame containing internal vertex ids in the identified
column, replace this with external vertex ids. If the renumbering
is from a single column, the output dataframe will use the same
name for the external vertex identifiers. If the renumbering is from
a multi-column input, the output columns will be labeled 0 through
n-1 with a suffix of _column_name.
Note that this function does not guarantee order or partitioning in
multi-GPU mode.
Parameters
----------
df: cudf.DataFrame or dask_cudf.DataFrame
A DataFrame containing internal vertex identifiers that will be
converted into external vertex identifiers.
column_name: string
Name of the column containing the internal vertex id.
preserve_order: bool, optional (default=False)
If True, preserve the ourder of the rows in the output
DataFrame to match the input DataFrame
get_column_names: bool, optional (default=False)
If True, the unrenumbered column names are returned.
Returns
---------
df : cudf.DataFrame or dask_cudf.DataFrame
The original DataFrame columns exist unmodified. The external
vertex identifiers are added to the DataFrame, the internal
vertex identifier column is removed from the dataframe.
column_names: string or list of strings
If get_column_names is True, the unrenumbered column names are
returned.
Examples
--------
>>> from cugraph.structure import number_map
>>> df = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'],
... header=None)
>>> df['0'] = df['0'].astype(str)
>>> df['1'] = df['1'].astype(str)
>>> df, number_map = number_map.NumberMap.renumber(df, '0', '1')
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(df,
... number_map.renumbered_src_col_name,
... number_map.renumbered_dst_col_name)
>>> pr = cugraph.pagerank(G, alpha = 0.85, max_iter = 500,
... tol = 1.0e-05)
>>> pr = number_map.unrenumber(pr, 'vertex')
"""
if len(self.implementation.col_names) == 1:
# Output will be renamed to match input
mapping = {"0": column_name}
col_names = column_name
else:
# Output will be renamed to ${i}_${column_name}
mapping = {}
for nm in self.implementation.col_names:
mapping[nm] = nm + "_" + column_name
col_names = list(mapping.values())
if isinstance(self.input_src_col_names, list):
input_src_col_names = self.input_src_col_names.copy()
input_dst_col_names = self.input_dst_col_names.copy()
else:
# Assuming the src and dst columns are of the same length
# if they are lists.
input_src_col_names = [self.input_src_col_names]
input_dst_col_names = [self.input_dst_col_names]
if not isinstance(col_names, list):
col_names = [col_names]
if column_name in [
self.renumbered_src_col_name,
self.implementation.src_col_names,
]:
self.internal_to_external_col_names.update(
dict(zip(col_names, input_src_col_names))
)
elif column_name in [
self.renumbered_dst_col_name,
self.implementation.dst_col_names,
]:
self.internal_to_external_col_names.update(
dict(zip(col_names, input_dst_col_names))
)
if len(self.implementation.col_names) == 1:
col_names = col_names[0]
if preserve_order:
index_name = NumberMap.generate_unused_column_name(df)
df[index_name] = df.index
df = self.from_internal_vertex_id(df, column_name, drop=True)
if preserve_order:
df = (
df.sort_values(index_name)
.drop(columns=index_name)
.reset_index(drop=True)
)
if type(df) is dask_cudf.DataFrame:
df = df.map_partitions(lambda df: df.rename(columns=mapping, copy=False))
else:
df = df.rename(columns=mapping, copy=False)
# FIXME: This parameter is not working as expected as it oesn't return
# the unrenumbered column names: leverage 'self.internal_to_external_col_names'
# instead.
if get_column_names:
return df, col_names
else:
return df
def vertex_column_size(self):
return len(self.implementation.col_names)
def set_renumbered_col_names(
self, src_col_names_to_replace, dst_col_names_to_replace, all_col_names
):
"""
Sets self.renumbered_src_col_name and self.renumbered_dst_col_name to
values that can be used to replace src_col_names_to_replace and
dst_col_names_to_replace to values that will not collide with any other
column names in all_col_names.
The new unique column names are generated using the existing
self.renumbered_src_col_name and self.renumbered_dst_col_name as
starting points.
"""
assert isinstance(src_col_names_to_replace, Iterable)
assert isinstance(dst_col_names_to_replace, Iterable)
assert isinstance(all_col_names, Iterable)
# No need to consider the col_names_to_replace when picking new unique
# names, since those names will be replaced anyway, and replacing a
# name with the same value is allowed.
reserved_col_names = set(all_col_names) - set(
src_col_names_to_replace + dst_col_names_to_replace
)
self.renumbered_src_col_name = self.generate_unused_column_name(
reserved_col_names, start_with_name=self.renumbered_src_col_name
)
self.renumbered_dst_col_name = self.generate_unused_column_name(
reserved_col_names, start_with_name=self.renumbered_dst_col_name
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/__init__.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.structure.graph_classes import (
Graph,
MultiGraph,
BiPartiteGraph,
)
from cugraph.structure.graph_classes import (
is_weighted,
is_directed,
is_multigraph,
is_bipartite,
is_multipartite,
)
from cugraph.structure.number_map import NumberMap
from cugraph.structure.symmetrize import symmetrize, symmetrize_df, symmetrize_ddf
from cugraph.structure.replicate_edgelist import (
replicate_edgelist,
replicate_cudf_dataframe,
replicate_cudf_series,
)
from cugraph.structure.convert_matrix import (
from_edgelist,
from_cudf_edgelist,
from_pandas_edgelist,
to_pandas_edgelist,
from_pandas_adjacency,
to_pandas_adjacency,
from_numpy_array,
to_numpy_array,
from_numpy_matrix,
to_numpy_matrix,
from_adjlist,
)
from cugraph.structure.hypergraph import hypergraph
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_primtypes_wrapper.pyx | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# cython: profile=False
# distutils: language = c++
# cython: embedsignature = True
# cython: language_level = 3
from cugraph.structure.graph_primtypes cimport *
from cugraph.structure.utils_wrapper import *
from libcpp cimport bool
import enum
from libc.stdint cimport uintptr_t
import dask_cudf as dc
import cugraph.dask.comms.comms as Comms
from dask.distributed import wait, default_client
from cugraph.dask.common.input_utils import DistributedDataHandler
import cudf
import numpy as np
def datatype_cast(cols, dtypes):
cols_out = []
for col in cols:
if col is None or col.dtype.type in dtypes:
cols_out.append(col)
else:
cols_out.append(col.astype(dtypes[0]))
return cols_out
class Direction(enum.Enum):
ALL = 0
IN = 1
OUT = 2
def view_adj_list(input_graph):
# FIXME: this function assumes columns named "src" and "dst" and can only
# be used for SG graphs due to that assumption.
if input_graph.adjlist is None:
if input_graph.edgelist is None:
raise ValueError('Graph is Empty')
[src, dst] = datatype_cast([input_graph.edgelist.edgelist_df['src'], input_graph.edgelist.edgelist_df['dst']], [np.int32])
weights = None
if input_graph.edgelist.weights:
[weights] = datatype_cast([input_graph.edgelist.edgelist_df['weights']], [np.float32, np.float64])
return coo2csr(src, dst, weights)
def view_transposed_adj_list(input_graph):
# FIXME: this function assumes columns named "src" and "dst" and can only
# be used for SG graphs due to that assumption.
if input_graph.transposedadjlist is None:
if input_graph.edgelist is None:
if input_graph.adjlist is None:
raise ValueError('Graph is Empty')
else:
input_graph.view_edge_list()
[src, dst] = datatype_cast([input_graph.edgelist.edgelist_df['src'], input_graph.edgelist.edgelist_df['dst']], [np.int32])
weights = None
if input_graph.edgelist.weights:
[weights] = datatype_cast([input_graph.edgelist.edgelist_df['weights']], [np.float32, np.float64])
return coo2csr(dst, src, weights)
def view_edge_list(input_graph):
if input_graph.adjlist is None:
raise RuntimeError('Graph is Empty')
[offsets, indices] = datatype_cast([input_graph.adjlist.offsets, input_graph.adjlist.indices], [np.int32])
[weights] = datatype_cast([input_graph.adjlist.weights], [np.float32, np.float64])
num_verts = input_graph.number_of_vertices()
num_edges = input_graph.number_of_edges(directed_edges=True)
cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0]
cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0]
cdef GraphCSRView[int,int,float] graph
graph = GraphCSRView[int,int,float](<int*>c_offsets, <int*>c_indices, <float*>NULL, num_verts, num_edges)
src_indices = cudf.Series(np.zeros(num_edges), dtype= indices.dtype)
cdef uintptr_t c_src_indices = src_indices.__cuda_array_interface__['data'][0]
graph.get_source_indices(<int*>c_src_indices)
return src_indices, indices, weights
def _degree_coo(edgelist_df, src_name, dst_name, direction=Direction.ALL, num_verts=None, sID=None):
#
# Computing the degree of the input graph from COO
#
cdef DegreeDirection dir
src = edgelist_df[src_name]
dst = edgelist_df[dst_name]
if direction == Direction.ALL:
dir = DIRECTION_IN_PLUS_OUT
elif direction == Direction.IN:
dir = DIRECTION_IN
elif direction == Direction.OUT:
dir = DIRECTION_OUT
else:
raise ValueError("direction should be 0, 1 or 2")
[src, dst] = datatype_cast([src, dst], [np.int32])
if num_verts is None:
num_verts = 1 + max(src.max(), dst.max())
num_edges = len(src)
vertex_col = cudf.Series(np.zeros(num_verts, dtype=np.int32))
degree_col = cudf.Series(np.zeros(num_verts, dtype=np.int32))
cdef GraphCOOView[int,int,float] graph
cdef uintptr_t c_vertex = vertex_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_degree = degree_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_src = src.__cuda_array_interface__['data'][0]
cdef uintptr_t c_dst = dst.__cuda_array_interface__['data'][0]
graph = GraphCOOView[int,int,float](<int*>c_src, <int*>c_dst, <float*>NULL, num_verts, num_edges)
cdef size_t handle_size_t
if sID is not None:
handle = Comms.get_handle(sID)
handle_size_t = <size_t>handle.getHandle()
graph.set_handle(<handle_t*>handle_size_t)
graph.degree(<int*> c_degree, dir)
graph.get_vertex_identifiers(<int*>c_vertex)
return vertex_col, degree_col
def _degree_csr(offsets, indices, direction=Direction.ALL):
cdef DegreeDirection dir
if direction == Direction.ALL:
dir = DIRECTION_IN_PLUS_OUT
elif direction == Direction.IN:
dir = DIRECTION_IN
elif direction == Direction.OUT:
dir = DIRECTION_OUT
else:
raise ValueError("direction should be 0, 1 or 2")
[offsets, indices] = datatype_cast([offsets, indices], [np.int32])
num_verts = len(offsets)-1
num_edges = len(indices)
vertex_col = cudf.Series(np.zeros(num_verts, dtype=np.int32))
degree_col = cudf.Series(np.zeros(num_verts, dtype=np.int32))
cdef GraphCSRView[int,int,float] graph
cdef uintptr_t c_vertex = vertex_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_degree = degree_col.__cuda_array_interface__['data'][0]
cdef uintptr_t c_offsets = offsets.__cuda_array_interface__['data'][0]
cdef uintptr_t c_indices = indices.__cuda_array_interface__['data'][0]
graph = GraphCSRView[int,int,float](<int*>c_offsets, <int*>c_indices, <float*>NULL, num_verts, num_edges)
graph.degree(<int*> c_degree, dir)
graph.get_vertex_identifiers(<int*>c_vertex)
return vertex_col, degree_col
def _mg_degree(input_graph, direction=Direction.ALL):
if input_graph.edgelist is None:
input_graph.compute_renumber_edge_list(transposed=False)
# The edge list renumbering step gives the columns that were renumbered
# potentially new unique names.
src_col_name = input_graph.renumber_map.renumbered_src_col_name
dst_col_name = input_graph.renumber_map.renumbered_dst_col_name
input_ddf = input_graph.edgelist.edgelist_df
# Get the total number of vertices by summing each partition's number of vertices
num_verts = input_graph.renumber_map.implementation.ddf.\
map_partitions(len).compute().sum()
data = DistributedDataHandler.create(data=input_ddf)
comms = Comms.get_comms()
client = default_client()
data.calculate_parts_to_sizes(comms)
if direction==Direction.IN:
degree_ddf = [client.submit(_degree_coo,
wf[1][0],
src_col_name,
dst_col_name,
Direction.IN,
num_verts,
comms.sessionId,
workers=[wf[0]])
for idx, wf in enumerate(data.worker_to_parts.items())]
if direction==Direction.OUT:
degree_ddf = [client.submit(_degree_coo,
wf[1][0],
dst_col_name,
src_col_name,
Direction.IN,
num_verts,
comms.sessionId,
workers=[wf[0]])
for idx, wf in enumerate(data.worker_to_parts.items())]
wait(degree_ddf)
return degree_ddf[0].result()
def _degree(input_graph, direction=Direction.ALL):
# FIXME: this function assumes columns named "src" and "dst" and can only
# be used for SG graphs due to that assumption.
transpose_direction = { Direction.ALL: Direction.ALL,
Direction.IN: Direction.OUT,
Direction.OUT: Direction.IN }
if input_graph.adjlist is not None:
return _degree_csr(input_graph.adjlist.offsets,
input_graph.adjlist.indices,
direction)
if input_graph.transposedadjlist is not None:
return _degree_csr(input_graph.transposedadjlist.offsets,
input_graph.transposedadjlist.indices,
transpose_direction[direction])
if input_graph.edgelist is not None:
return _degree_coo(input_graph.edgelist.edgelist_df,
'src', 'dst', direction)
raise ValueError("input_graph not COO, CSR or CSC")
def _degrees(input_graph):
verts, indegrees = _degree(input_graph, Direction.IN)
verts, outdegrees = _degree(input_graph, Direction.OUT)
return verts, indegrees, outdegrees
def weight_type(input_graph):
weights_type = None
if input_graph.edgelist.weights:
weights_type = input_graph.edgelist.edgelist_df['weights'].dtype
return weights_type
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/property_graph.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cudf
import numpy as np
import cugraph
from cugraph.utilities.utils import (
import_optional,
MissingModule,
create_list_series_from_2d_ar,
)
from typing import Union
pd = import_optional("pandas")
_dataframe_types = [cudf.DataFrame]
if not isinstance(pd, MissingModule):
_dataframe_types.append(pd.DataFrame)
# FIXME: remove leading EXPERIMENTAL__ when no longer experimental
class EXPERIMENTAL__PropertySelection:
"""
Instances of this class are returned from the PropertyGraph.select_*()
methods and can be used by the PropertyGraph.extract_subgraph() method to
extract a Graph containing vertices and edges with only the selected
properties.
"""
def __init__(self, vertex_selection_series=None, edge_selection_series=None):
"""
Create a PropertySelection out of one or two Series objects containing
booleans representing whether or not a specific row in a PropertyGraph
internal vertex DataFrame (vertex_selection_series) or
edge DataFrame (edge_selection_series) is selected.
Parameters
----------
vertex_selection_series : cudf or pandas series, optional
Contains booleans representing selected vertices
edge_selection_series : cudf or pandas series, optional
Contains booleans representing selected edges
"""
self.vertex_selections = vertex_selection_series
self.edge_selections = edge_selection_series
def __add__(self, other):
"""
Add either the vertex_selections, edge_selections, or both to this
instance from "other" if either are not already set.
Parameters
----------
other : PropertySelection to add
Returns
-------
PropertySelection
New PropertySelection instance containing the selection
Series objects from either the current instance if present,
or instances from "other" only if those instances are not already
present in the current instance.
"""
vs = self.vertex_selections
if vs is None:
vs = other.vertex_selections
es = self.edge_selections
if es is None:
es = other.edge_selections
return EXPERIMENTAL__PropertySelection(vs, es)
# FIXME: remove leading EXPERIMENTAL__ when no longer experimental
class EXPERIMENTAL__PropertyGraph:
"""
Class which stores vertex and edge properties that can be used to construct
Graphs from individual property selections and used later to annotate graph
algorithm results with corresponding properties.
"""
# column name constants used in internal DataFrames
vertex_col_name = "_VERTEX_"
"""
Column containing the vertex id.
"""
src_col_name = "_SRC_"
"""
Column containing the id of the edge source
"""
dst_col_name = "_DST_"
"""
Column containing the id of the edge destination
"""
type_col_name = "_TYPE_"
"""
Column containing the type of the edge or vertex
"""
edge_id_col_name = "_EDGE_ID_"
"""
Column containing the edge identifier
"""
weight_col_name = "_WEIGHT_"
"""
Column containing the edge weight if the graph is weighted.
"""
_default_type_name = ""
def __init__(self):
# The dataframe containing the properties for each vertex.
# Each vertex occupies a row, and individual properties are maintained
# in individual columns. The table contains a column for each property
# of each vertex. If a vertex does not contain a property, it will have
# a NaN value in that property column. Each vertex will also have a
# "type_name" that can be assigned by the caller to describe the type
# of the vertex for a given application domain. If no type_name is
# provided, the default type_name is "".
# Example:
# vertex | type_name | propA | propB | propC
# ------------------------------------------
# 3 | "user" | 22 | NaN | 11
# 88 | "service" | NaN | 3.14 | 21
# 9 | "" | NaN | NaN | 2
self.__vertex_prop_dataframe = None
# The dataframe containing the properties for each edge.
# The description is identical to the vertex property dataframe, except
# edges are identified by ordered pairs of vertices (src and dst).
# Example:
# src | dst | type_name | propA | propB | propC
# ---------------------------------------------
# 3 | 88 | "started" | 22 | NaN | 11
# 88 | 9 | "called" | NaN | 3.14 | 21
# 9 | 88 | "" | NaN | NaN | 2
self.__edge_prop_dataframe = None
# The var:value dictionaries used during evaluation of filter/query
# expressions for vertices and edges. These dictionaries contain
# entries for each column name in their respective dataframes which
# are mapped to instances of PropertyColumn objects.
#
# When filter/query expressions are evaluated, PropertyColumn objects
# are used in place of DataFrame columns in order to support string
# comparisons when cuDF DataFrames are used. This approach also allows
# expressions to contain var names that can be used in expressions that
# are different than those in the actual internal tables, allowing for
# the tables to contain additional or different column names than what
# can be used in expressions.
#
# Example: "type_name == 'user' & propC > 10"
#
# The above would be evaluated and "type_name" and "propC" would be
# PropertyColumn instances which support specific operators used in
# queries.
self.__vertex_prop_eval_dict = {}
self.__edge_prop_eval_dict = {}
# The types used for DataFrames and Series, typically Pandas (for host
# storage) or cuDF (device storage), but this need not strictly be one
# of those if the type supports the Pandas-like API. These are used for
# constructing other DataFrames and Series of the same type, as well as
# for enforing that both vertex and edge properties are the same type.
self.__dataframe_type = None
self.__series_type = None
# The dtypes for each column in each DataFrame. This is required since
# merge operations can often change the dtypes to accommodate NaN
# values (eg. int64 to float64, since NaN is a float).
self.__vertex_prop_dtypes = {}
self.__edge_prop_dtypes = {}
# Lengths of the properties that are vectors
self.__vertex_vector_property_lengths = {}
self.__edge_vector_property_lengths = {}
# Add unique edge IDs to the __edge_prop_dataframe by simply
# incrementing this counter. Remains None if user provides edge IDs.
self.__last_edge_id = None
# Are edge IDs automatically generated sequentially by PG (True),
# provided by the user (False), or no edges added yet (None).
self.__is_edge_id_autogenerated = None
# Cached property values
self.__num_vertices = None
self.__vertex_type_value_counts = None
self.__edge_type_value_counts = None
def _build_from_components(
self,
*,
vertex_prop_dataframe,
edge_prop_dataframe,
dataframe_type,
series_type,
vertex_prop_dtypes,
edge_prop_dtypes,
vertex_vector_property_lengths,
edge_vector_property_lengths,
last_edge_id,
is_edge_id_autogenerated,
# Computable
vertex_prop_eval_dict=None,
edge_prop_eval_dict=None,
# Cached properties
num_vertices=None,
vertex_type_value_counts=None,
edge_type_value_counts=None,
):
"""Backdoor to populate a PropertyGraph from existing data.
Use only if you know what you're doing.
"""
self.__vertex_prop_dataframe = vertex_prop_dataframe
self.__edge_prop_dataframe = edge_prop_dataframe
if vertex_prop_eval_dict is None:
vertex_prop_eval_dict = {}
if vertex_prop_dataframe is not None:
self._update_eval_dict(
vertex_prop_eval_dict, vertex_prop_dataframe, self.vertex_col_name
)
self.__vertex_prop_eval_dict = vertex_prop_eval_dict
if edge_prop_eval_dict is None:
edge_prop_eval_dict = {}
if edge_prop_dataframe is not None:
self._update_eval_dict(
edge_prop_eval_dict, edge_prop_dataframe, self.edge_id_col_name
)
self.__edge_prop_eval_dict = edge_prop_eval_dict
self.__dataframe_type = dataframe_type
self.__series_type = series_type
self.__vertex_prop_dtypes = vertex_prop_dtypes
self.__edge_prop_dtypes = edge_prop_dtypes
self.__vertex_vector_property_lengths = vertex_vector_property_lengths
self.__edge_vector_property_lengths = edge_vector_property_lengths
self.__last_edge_id = last_edge_id
self.__is_edge_id_autogenerated = is_edge_id_autogenerated
self.__num_vertices = num_vertices
self.__vertex_type_value_counts = vertex_type_value_counts
self.__edge_type_value_counts = edge_type_value_counts
# PropertyGraph read-only attributes
@property
def edges(self):
"""
All the edges in the graph as a DataFrame containing
sources and destinations. It does not return the edge properties.
"""
if self.__edge_prop_dataframe is not None:
return self.__edge_prop_dataframe[
[self.src_col_name, self.dst_col_name]
].reset_index()
return None
@property
def vertex_property_names(self):
"""
Names of all the vertex properties excluding type.
"""
if self.__vertex_prop_dataframe is not None:
props = list(self.__vertex_prop_dataframe.columns)
props.remove(self.type_col_name) # should "type" be removed?
return props
return []
@property
def edge_property_names(self):
"""
List containing each edge property name in the PropertyGraph instance.
"""
if self.__edge_prop_dataframe is not None:
props = list(self.__edge_prop_dataframe.columns)
props.remove(self.src_col_name)
props.remove(self.dst_col_name)
props.remove(self.type_col_name) # should "type" be removed?
if self.weight_col_name in props:
props.remove(self.weight_col_name)
return props
return []
@property
def vertex_types(self):
"""
The set of vertex type names
"""
value_counts = self._vertex_type_value_counts
if value_counts is None:
names = set()
elif self.__series_type is cudf.Series:
names = set(value_counts.index.to_arrow().to_pylist())
else:
names = set(value_counts.index)
default = self._default_type_name
if default not in names and self.get_num_vertices(default) > 0:
# include "" from vertices that only exist in edge data
names.add(default)
return names
@property
def edge_types(self):
"""
Series containing the set of edge type names
"""
value_counts = self._edge_type_value_counts
if value_counts is None:
return set()
elif self.__series_type is cudf.Series:
return set(value_counts.index.to_arrow().to_pylist())
else:
return set(value_counts.index)
# PropertyGraph read-only attributes for debugging
@property
def _vertex_prop_dataframe(self):
return self.__vertex_prop_dataframe
@property
def _edge_prop_dataframe(self):
"""
Dataframe containing the edge properties.
"""
return self.__edge_prop_dataframe
@property
def _vertex_type_value_counts(self):
"""
A Series of the counts of types in __vertex_prop_dataframe
"""
if self.__vertex_prop_dataframe is None:
return
if self.__vertex_type_value_counts is None:
# Types should all be strings; what should we do if we see NaN?
self.__vertex_type_value_counts = self.__vertex_prop_dataframe[
self.type_col_name
].value_counts(sort=False, dropna=False)
return self.__vertex_type_value_counts
@property
def _edge_type_value_counts(self):
"""
Series of the counts of types in __edge_prop_dataframe
"""
if self.__edge_prop_dataframe is None:
return
if self.__edge_type_value_counts is None:
# Types should all be strings; what should we do if we see NaN?
self.__edge_type_value_counts = self.__edge_prop_dataframe[
self.type_col_name
].value_counts(sort=False, dropna=False)
return self.__edge_type_value_counts
def get_num_vertices(self, type=None, *, include_edge_data=True):
"""
Return the number of all vertices or vertices of a given type.
Parameters
----------
type : string, optional
If type is None (the default), return the total number of vertices,
otherwise return the number of vertices of the specified type.
include_edge_data : bool (default True)
If True, include vertices that were added in vertex and edge data.
If False, only include vertices that were added in vertex data.
Note that vertices that only exist in edge data are assumed to have
the default type.
Returns
-------
int
The number of vertices in the graph constrained by the type parameter.
See Also
--------
PropertyGraph.get_num_edges
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, vertex_col_names=("src", "dst"))
>>> pG.get_num_vertices()
8
"""
if type is None:
if not include_edge_data:
if self.__vertex_prop_dataframe is None:
return 0
return len(self.__vertex_prop_dataframe)
if self.__num_vertices is not None:
return self.__num_vertices
self.__num_vertices = 0
vert_sers = self.__get_all_vertices_series()
if vert_sers:
if self.__series_type is cudf.Series:
self.__num_vertices = cudf.concat(
vert_sers, ignore_index=True
).nunique()
else:
self.__num_vertices = pd.concat(
vert_sers, ignore_index=True
).nunique()
return self.__num_vertices
value_counts = self._vertex_type_value_counts
if type == self._default_type_name and include_edge_data:
# The default type, "", can refer to both vertex and edge data
if self.__vertex_prop_dataframe is None:
return self.get_num_vertices()
return (
self.get_num_vertices()
- len(self.__vertex_prop_dataframe)
+ (value_counts[type] if type in value_counts else 0)
)
if self.__vertex_prop_dataframe is None:
return 0
return value_counts[type] if type in value_counts else 0
def get_num_edges(self, type=None):
"""
Return the number of all edges or edges of a given type.
Parameters
----------
type : string, optional
Edge type or None, if None then all edges are counted
Returns
-------
int
If type is None (the default), returns the total number of edges,
otherwise return the number of edges of the specified type.
See Also
--------
PropertyGraph.get_num_vertices
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> pG.get_num_edges()
4
"""
if type is None:
if self.__edge_prop_dataframe is not None:
return len(self.__edge_prop_dataframe)
else:
return 0
if self.__edge_prop_dataframe is None:
return 0
value_counts = self._edge_type_value_counts
return value_counts[type] if type in value_counts else 0
def get_vertices(self, selection=None):
"""
Return a Series containing the unique vertex IDs contained in both
the vertex and edge property data in ascending order.
Selection is not yet supported.
Parameters
----------
selection : PropertySelection, optional
A PropertySelection returned from one or more calls to
select_vertices() and/or select_edges()
Returns
-------
cudf series or pandas series, optional
Contains vertices that match the selection or all
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> pG.get_vertices()
0 22
1 34
2 56
3 88
4 96
5 97
6 98
7 99
dtype: int64
"""
vert_sers = self.__get_all_vertices_series()
if vert_sers:
if self.__series_type is cudf.Series:
return self.__series_type(
cudf.concat(vert_sers, ignore_index=True).unique().sort_values()
)
else:
x = pd.Series(pd.concat(vert_sers, ignore_index=True).unique())
return self.__series_type(x.sort_values())
return self.__series_type()
def vertices_ids(self):
"""
Alias for get_vertices()
Returns
-------
cudf Series or pandas Series
Series containing the unique vertex IDs in both the
vertex and edge property data. Return type is based
on if the PropertyGraph instance was created/updated
using cudf or pandas DataFrames.
See Also
--------
PropertyGraph.get_vertices
"""
return self.get_vertices()
def vertex_types_from_numerals(
self, nums: Union[cudf.Series, pd.Series]
) -> Union[cudf.Series, pd.Series]:
"""
Returns the string vertex type names given the numeric category labels.
Parameters
----------
nums: Union[cudf.Series, pandas.Series] (Required)
The list of numeric category labels to convert.
Returns
-------
Union[cudf.Series, pd.Series]
The string type names converted from the input numerals.
"""
return self.__vertex_prop_dataframe[self.type_col_name].dtype.categories[nums]
def edge_types_from_numerals(
self, nums: Union[cudf.Series, pd.Series]
) -> Union[cudf.Series, pd.Series]:
"""
Returns the string edge type names given the numeric category labels.
Parameters
----------
nums: Union[cudf.Series, pandas.Series] (Required)
The list of numeric category labels to convert.
Returns
-------
Union[cudf.Series, pd.Series]
The string type names converted from the input numerals.
"""
return self.__edge_prop_dataframe[self.type_col_name].dtype.categories[nums]
def add_vertex_data(
self,
dataframe,
vertex_col_name,
type_name=None,
property_columns=None,
vector_properties=None,
vector_property=None,
):
"""
Add a dataframe describing vertex properties to the PropertyGraph.
Can contain additional vertices that will not have associated edges.
Parameters
----------
dataframe : DataFrame-compatible instance
A DataFrame instance with a compatible Pandas-like DataFrame
interface.
vertex_col_name : string
The column name that contains the values to be used as vertex IDs,
or the name of the index if the index is vertex IDs.
Specifying the index may be more efficient.
type_name : string, optional
The name to be assigned to the type of property being added. For
example, if dataframe contains data about users, type_name might be
"users". If not specified, the type of properties will be added as
the empty string, "".
property_columns : list of strings, optional
List of column names in dataframe to be added as properties. All
other columns in the dataframe will be ignored. If not specified, all
columns in dataframe are added.
vector_properties : dict of string to list of strings, optional
A dict of vector properties to create from columns in the dataframe.
Each vector property stores an array for each vertex.
The dict keys are the new vector property names, and the dict values
should be Python lists of column names from which to create the vector
property. Columns used to create vector properties won't be added to
the property graph by default, but may be included as properties by
including them in the property_columns argument.
Use ``PropertyGraph.vertex_vector_property_to_array`` to convert a
vertex vector property to an array.
vector_property : string, optional
If provided, all columns not included in other arguments will be used
to create a vector property with the given name. This is often used
for convenience instead of ``vector_properties`` when all input
properties should be converted to a vector property.
Returns
-------
None
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> vert_df = cudf.DataFrame({"vert_id": [99, 22, 98, 34, 97, 56, 96, 88],
... "v_prop": [1, 2, 3, 4, 5, 6, 7, 8]})
>>> pG.add_vertex_data(vert_df, type_name="vtype", vertex_col_name="vert_id")
>>> pG.get_vertex_data().sort_index(axis=1)
_TYPE_ _VERTEX_ v_prop
0 vtype 99 1
1 vtype 22 2
2 vtype 98 3
3 vtype 34 4
4 vtype 97 5
5 vtype 56 6
6 vtype 96 7
7 vtype 88 8
"""
if type(dataframe) not in _dataframe_types:
raise TypeError(
"dataframe must be one of the following types: "
f"{_dataframe_types}, got: {type(dataframe)}"
)
if vertex_col_name not in dataframe.columns:
if vertex_col_name != dataframe.index.name:
raise ValueError(
f"{vertex_col_name} is not a column in "
f"dataframe: {dataframe.columns}"
)
index_is_set = True
else:
index_is_set = False
if type_name is not None and not isinstance(type_name, str):
raise TypeError(f"type_name must be a string, got: {type(type_name)}")
if type_name is None:
type_name = self._default_type_name
if property_columns:
if type(property_columns) is not list:
raise TypeError(
f"property_columns must be a list, got: {type(property_columns)}"
)
invalid_columns = set(property_columns).difference(dataframe.columns)
if invalid_columns:
raise ValueError(
"property_columns contains column(s) not found in dataframe: "
f"{list(invalid_columns)}"
)
existing_vectors = (
set(property_columns) & self.__vertex_vector_property_lengths.keys()
)
if existing_vectors:
raise ValueError(
"Non-vector property columns cannot be added to existing "
f"vector properties: {', '.join(sorted(existing_vectors))}"
)
# Save the DataFrame and Series types for future instantiations
if self.__dataframe_type is None or self.__series_type is None:
self.__dataframe_type = type(dataframe)
self.__series_type = type(dataframe[dataframe.columns[0]])
else:
if type(dataframe) is not self.__dataframe_type:
raise TypeError(
f"dataframe is type {type(dataframe)} but "
"the PropertyGraph was already initialized "
f"using type {self.__dataframe_type}"
)
TCN = self.type_col_name
if vector_properties is not None:
invalid_keys = {self.vertex_col_name, TCN}
if property_columns:
invalid_keys.update(property_columns)
self._check_vector_properties(
dataframe,
vector_properties,
self.__vertex_vector_property_lengths,
invalid_keys,
)
if vector_property is not None:
invalid_keys = {self.vertex_col_name, TCN, vertex_col_name}
if property_columns:
invalid_keys.update(property_columns)
if vector_properties:
invalid_keys.update(*vector_properties.values())
d = {
vector_property: [
col for col in dataframe.columns if col not in invalid_keys
]
}
invalid_keys.remove(vertex_col_name)
self._check_vector_properties(
dataframe,
d,
self.__vertex_vector_property_lengths,
invalid_keys,
)
# Update vector_properties, but don't mutate the original
if vector_properties is not None:
d.update(vector_properties)
vector_properties = d
# Clear the cached values related to the number of vertices since more
# could be added in this method.
self.__num_vertices = None
self.__vertex_type_value_counts = None # Could update instead
# Add `type_name` to the TYPE categorical dtype if necessary
is_first_data = self.__vertex_prop_dataframe is None
if is_first_data:
# Initialize the __vertex_prop_dataframe using the same type
# as the incoming dataframe.
self.__vertex_prop_dataframe = self.__dataframe_type(
columns=[self.vertex_col_name, TCN]
)
# Initialize the new columns to the same dtype as the appropriate
# column in the incoming dataframe, since the initial merge may not
# result in the same dtype. (see
# https://github.com/rapidsai/cudf/issues/9981)
if not index_is_set:
self.__vertex_prop_dataframe = self.__update_dataframe_dtypes(
self.__vertex_prop_dataframe,
{self.vertex_col_name: dataframe[vertex_col_name].dtype},
)
self.__vertex_prop_dataframe.set_index(self.vertex_col_name, inplace=True)
# Use categorical dtype for the type column
if self.__series_type is cudf.Series:
cat_class = cudf.CategoricalDtype
else:
cat_class = pd.CategoricalDtype
cat_dtype = cat_class([type_name], ordered=False)
else:
cat_dtype = self.__update_categorical_dtype(
self.__vertex_prop_dataframe, TCN, type_name
)
# NOTE: This copies the incoming DataFrame in order to add the new
# columns. The copied DataFrame is then merged (another copy) and then
# deleted when out-of-scope.
# Ensure that both the predetermined vertex ID column name and vertex
# type column name are present for proper merging.
tmp_df = dataframe.copy(deep=True)
if not index_is_set:
tmp_df[self.vertex_col_name] = tmp_df[vertex_col_name]
elif tmp_df.index.name != self.vertex_col_name:
tmp_df.index = tmp_df.index.rename(self.vertex_col_name)
# FIXME: handle case of a type_name column already being in tmp_df
tmp_df[TCN] = self.__series_type(type_name, index=tmp_df.index).astype(
cat_dtype
)
if property_columns:
# all columns
column_names_to_drop = set(tmp_df.columns)
# remove the ones to keep
column_names_to_drop.difference_update(
property_columns + [self.vertex_col_name, TCN]
)
else:
column_names_to_drop = {vertex_col_name}
if index_is_set:
column_names_to_drop -= {self.vertex_col_name, vertex_col_name}
if vector_properties:
# Drop vector property source columns by default
more_to_drop = set().union(*vector_properties.values())
if property_columns is not None:
more_to_drop.difference_update(property_columns)
column_names_to_drop |= more_to_drop
column_names_to_drop -= vector_properties.keys()
self._create_vector_properties(tmp_df, vector_properties)
tmp_df.drop(labels=column_names_to_drop, axis=1, inplace=True)
# Save the original dtypes for each new column so they can be restored
# prior to constructing subgraphs (since column dtypes may get altered
# during merge to accommodate NaN values).
if is_first_data:
new_col_info = tmp_df.dtypes.items()
else:
new_col_info = self.__get_new_column_dtypes(
tmp_df, self.__vertex_prop_dataframe
)
self.__vertex_prop_dtypes.update(new_col_info)
if not index_is_set:
tmp_df.set_index(self.vertex_col_name, inplace=True)
tmp_df = self.__update_dataframe_dtypes(tmp_df, self.__vertex_prop_dtypes)
if is_first_data:
self.__vertex_prop_dataframe = tmp_df
else:
# Join on vertex ids (the index)
# TODO: can we automagically determine when we to use concat?
df = self.__vertex_prop_dataframe.join(tmp_df, how="outer", rsuffix="_NEW_")
cols = self.__vertex_prop_dataframe.columns.intersection(
tmp_df.columns
).to_list()
rename_cols = {f"{col}_NEW_": col for col in cols}
new_cols = list(rename_cols)
sub_df = df[new_cols].rename(columns=rename_cols)
df.drop(columns=new_cols, inplace=True)
# This only adds data--it doesn't replace existing data
df.fillna(sub_df, inplace=True)
self.__vertex_prop_dataframe = df
# Update the vertex eval dict with the latest column instances
self._update_eval_dict(
self.__vertex_prop_eval_dict,
self.__vertex_prop_dataframe,
self.vertex_col_name,
)
def _update_eval_dict(self, eval_dict, df, index_name):
# Update the vertex eval dict with the latest column instances
if self.__series_type is cudf.Series:
latest = {n: df[n] for n in df.columns}
else:
latest = df.to_dict("series")
eval_dict.update(latest)
eval_dict[index_name] = df.index
def get_vertex_data(self, vertex_ids=None, types=None, columns=None):
"""
Gets a DataFrame containing vertex properties
Parameters
----------
vertex_ids : one or a collection of integers, optional
single, list, slice, pandas array, or series of integers which
are the vertices to include in the returned dataframe
types : str or collection of str, optional
types of the vertices to include in the returned data.
Default is to return all vertex types.
columns : str or list of str, optional
property or properties to include in returned data.
Default includes all properties.
Returns
-------
DataFrame
containing vertex properties for only the specified
vertex_ids, columns, and/or types, or all vertex IDs if not specified.
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> vert_df = cudf.DataFrame({"vert_id": [99, 22, 98, 34, 97, 56, 96, 88],
... "v_prop": [1, 2, 3, 4, 5, 6, 7, 8]})
>>> pG.add_vertex_data(vert_df, type_name="vtype", vertex_col_name="vert_id")
>>> pG.get_vertex_data().sort_index(axis=1)
_TYPE_ _VERTEX_ v_prop
0 vtype 99 1
1 vtype 22 2
2 vtype 98 3
3 vtype 34 4
4 vtype 97 5
5 vtype 56 6
6 vtype 96 7
7 vtype 88 8
"""
if self.__vertex_prop_dataframe is not None:
df = self.__vertex_prop_dataframe
if vertex_ids is not None:
if isinstance(vertex_ids, int):
vertex_ids = [vertex_ids]
try:
df = df.loc[vertex_ids]
except TypeError:
raise TypeError(
"vertex_ids needs to be a list-like type "
f"compatible with DataFrame.loc[], got {type(vertex_ids)}"
)
if types is not None:
if isinstance(types, str):
df_mask = df[self.type_col_name] == types
else:
df_mask = df[self.type_col_name].isin(types)
df = df.loc[df_mask]
# The "internal" pG.vertex_col_name and pG.type_col_name columns
# are also included/added since they are assumed to be needed by
# the caller.
if columns is not None:
# FIXME: invalid columns will result in a KeyError, should a
# check be done here and a more PG-specific error raised?
df = df[[self.type_col_name] + columns]
# Should not drop to ensure vertex ids are returned as a column.
df_out = df.reset_index(drop=False)
# Preserve the dtype (vertex id type) to avoid cugraph algorithms
# throwing errors due to a dtype mismatch
index_dtype = self.__vertex_prop_dataframe.index.dtype
df_out.index = df_out.index.astype(index_dtype)
return df_out
return None
def add_edge_data(
self,
dataframe,
vertex_col_names,
edge_id_col_name=None,
type_name=None,
property_columns=None,
vector_properties=None,
vector_property=None,
):
"""
Add a dataframe describing edge properties to the PropertyGraph.
Columns not specified as vertex columns are considered properties.
Parameters
----------
dataframe : DataFrame-compatible instance
A DataFrame instance with a compatible Pandas-like DataFrame
interface.
vertex_col_names : list of strings
The column names that contain the values to be used as the source
and destination vertex IDs for the edges.
edge_id_col_name : string, optional
The column name that contains the values to be used as edge IDs,
or the name of the index if the index is edge IDs.
Specifying the index may be more efficient.
If unspecified, edge IDs will be automatically assigned.
Currently, all edge data must be added with the same method: either
with automatically generated IDs, or from user-provided edge IDs.
type_name : string, optional
The name to be assigned to the type of property being added. For
example, if dataframe contains data about transactions, type_name
might be "transactions". If not specified, the type of properties
will be added as the empty string "".
property_columns : list of strings, optional
List of column names in the dataframe to be added as properties. All
other columns in dataframe will be ignored. If not specified, all
property columns in the dataframe are added.
vector_properties : dict of string to list of strings, optional
A dict of vector properties to create from columns in the dataframe.
Each vector property stores an array for each edge.
The dict keys are the new vector property names, and the dict values
should be Python lists of column names from which to create the vector
property. Columns used to create vector properties won't be added to
the property graph by default, but may be included as properties by
including them in the property_columns argument.
Use ``PropertyGraph.edge_vector_property_to_array`` to convert an
edge vector property to an array.
vector_property : string, optional
If provided, all columns not included in other arguments will be used
to create a vector property with the given name. This is often used
for convenience instead of ``vector_properties`` when all input
properties should be converted to a vector property.
Returns
-------
None
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, vertex_col_names=("src", "dst"))
>>> pG.get_num_vertices()
8
"""
if type(dataframe) not in _dataframe_types:
raise TypeError(
"dataframe must be one of the following types: "
f"{_dataframe_types}, got: {type(dataframe)}"
)
if type(vertex_col_names) not in [list, tuple]:
raise TypeError(
"vertex_col_names must be a list or tuple, got: "
f"{type(vertex_col_names)}"
)
if edge_id_col_name is not None:
if not isinstance(edge_id_col_name, str):
raise TypeError(
"edge_id_col_name must be a string, got: "
f"{type(edge_id_col_name)}"
)
if edge_id_col_name not in dataframe.columns:
if edge_id_col_name != dataframe.index.name:
raise ValueError(
"edge_id_col_name argument not in columns, "
f"got {edge_id_col_name!r}"
)
index_is_set = True
else:
index_is_set = False
invalid_columns = set(vertex_col_names).difference(dataframe.columns)
if invalid_columns:
raise ValueError(
"vertex_col_names contains column(s) not found "
f"in dataframe: {list(invalid_columns)}"
)
if type_name is not None and not isinstance(type_name, str):
raise TypeError(f"type_name must be a string, got: {type(type_name)}")
if type_name is None:
type_name = self._default_type_name
if property_columns:
if type(property_columns) is not list:
raise TypeError(
f"property_columns must be a list, got: {type(property_columns)}"
)
invalid_columns = set(property_columns).difference(dataframe.columns)
if invalid_columns:
raise ValueError(
"property_columns contains column(s) not found in dataframe: "
f"{list(invalid_columns)}"
)
existing_vectors = (
set(property_columns) & self.__vertex_vector_property_lengths.keys()
)
if existing_vectors:
raise ValueError(
"Non-vector property columns cannot be added to existing "
f"vector properties: {', '.join(sorted(existing_vectors))}"
)
# Save the DataFrame and Series types for future instantiations
if self.__dataframe_type is None or self.__series_type is None:
self.__dataframe_type = type(dataframe)
self.__series_type = type(dataframe[dataframe.columns[0]])
else:
if type(dataframe) is not self.__dataframe_type:
raise TypeError(
f"dataframe is type {type(dataframe)} but "
"the PropertyGraph was already initialized "
f"using type {self.__dataframe_type}"
)
if self.__is_edge_id_autogenerated is False and edge_id_col_name is None:
raise NotImplementedError(
"Unable to automatically generate edge IDs. "
"`edge_id_col_name` must be specified if edge data has been "
"previously added with edge_id_col_name."
)
if self.__is_edge_id_autogenerated is True and edge_id_col_name is not None:
raise NotImplementedError(
"Invalid use of `edge_id_col_name`. Edge data has already "
"been added with automatically generated IDs, so now all "
"edge data must be added using automatically generated IDs."
)
TCN = self.type_col_name
if vector_properties is not None:
invalid_keys = {self.src_col_name, self.dst_col_name, TCN}
if property_columns:
invalid_keys.update(property_columns)
self._check_vector_properties(
dataframe,
vector_properties,
self.__edge_vector_property_lengths,
invalid_keys,
)
if vector_property is not None:
invalid_keys = {
self.src_col_name,
self.dst_col_name,
TCN,
vertex_col_names[0],
vertex_col_names[1],
}
if property_columns:
invalid_keys.update(property_columns)
if vector_properties:
invalid_keys.update(*vector_properties.values())
d = {
vector_property: [
col for col in dataframe.columns if col not in invalid_keys
]
}
invalid_keys.difference_update(vertex_col_names)
self._check_vector_properties(
dataframe,
d,
self.__edge_vector_property_lengths,
invalid_keys,
)
# Update vector_properties, but don't mutate the original
if vector_properties is not None:
d.update(vector_properties)
vector_properties = d
# Clear the cached value for num_vertices since more could be added in
# this method. This method cannot affect __node_type_value_counts
self.__num_vertices = None
self.__edge_type_value_counts = None # Could update instead
# Add `type_name` to the categorical dtype if necessary
is_first_data = self.__edge_prop_dataframe is None
if is_first_data:
self.__edge_prop_dataframe = self.__dataframe_type(
columns=[self.src_col_name, self.dst_col_name, TCN]
)
# Initialize the new columns to the same dtype as the appropriate
# column in the incoming dataframe, since the initial merge may not
# result in the same dtype. (see
# https://github.com/rapidsai/cudf/issues/9981)
self.__edge_prop_dataframe = self.__update_dataframe_dtypes(
self.__edge_prop_dataframe,
{
self.src_col_name: dataframe[vertex_col_names[0]].dtype,
self.dst_col_name: dataframe[vertex_col_names[1]].dtype,
},
)
self.__edge_prop_dataframe.index.name = self.edge_id_col_name
# Use categorical dtype for the type column
if self.__series_type is cudf.Series:
cat_class = cudf.CategoricalDtype
else:
cat_class = pd.CategoricalDtype
cat_dtype = cat_class([type_name], ordered=False)
self.__is_edge_id_autogenerated = edge_id_col_name is None
else:
cat_dtype = self.__update_categorical_dtype(
self.__edge_prop_dataframe, TCN, type_name
)
# NOTE: This copies the incoming DataFrame in order to add the new
# columns. The copied DataFrame is then merged (another copy) and then
# deleted when out-of-scope.
tmp_df = dataframe.copy(deep=True)
tmp_df[self.src_col_name] = tmp_df[vertex_col_names[0]]
tmp_df[self.dst_col_name] = tmp_df[vertex_col_names[1]]
tmp_df[TCN] = self.__series_type(type_name, index=tmp_df.index).astype(
cat_dtype
)
# Add unique edge IDs to the new rows. This is just a count for each
# row starting from the last edge ID value, with initial edge ID 0.
if edge_id_col_name is None:
start_eid = 0 if self.__last_edge_id is None else self.__last_edge_id
end_eid = start_eid + len(tmp_df) # exclusive
if self.__series_type is cudf.Series:
index_class = cudf.RangeIndex
else:
index_class = pd.RangeIndex
tmp_df.index = index_class(start_eid, end_eid, name=self.edge_id_col_name)
self.__last_edge_id = end_eid
else:
if not index_is_set:
tmp_df.set_index(edge_id_col_name, inplace=True)
tmp_df.index.name = self.edge_id_col_name
if property_columns:
# all columns
column_names_to_drop = set(tmp_df.columns)
# remove the ones to keep
column_names_to_drop.difference_update(
property_columns + [self.src_col_name, self.dst_col_name, TCN]
)
else:
column_names_to_drop = {vertex_col_names[0], vertex_col_names[1]}
if vector_properties:
# Drop vector property source columns by default
more_to_drop = set().union(*vector_properties.values())
if property_columns is not None:
more_to_drop.difference_update(property_columns)
column_names_to_drop |= more_to_drop
column_names_to_drop -= vector_properties.keys()
self._create_vector_properties(tmp_df, vector_properties)
tmp_df.drop(labels=column_names_to_drop, axis=1, inplace=True)
# Save the original dtypes for each new column so they can be restored
# prior to constructing subgraphs (since column dtypes may get altered
# during merge to accommodate NaN values).
if is_first_data:
new_col_info = tmp_df.dtypes.items()
else:
new_col_info = self.__get_new_column_dtypes(
tmp_df, self.__edge_prop_dataframe
)
self.__edge_prop_dtypes.update(new_col_info)
tmp_df = self.__update_dataframe_dtypes(tmp_df, self.__edge_prop_dtypes)
if is_first_data:
self.__edge_prop_dataframe = tmp_df
else:
# Join on edge ids (the index)
# TODO: can we automagically determine when we to use concat?
df = self.__edge_prop_dataframe.join(tmp_df, how="outer", rsuffix="_NEW_")
cols = self.__edge_prop_dataframe.columns.intersection(
tmp_df.columns
).to_list()
rename_cols = {f"{col}_NEW_": col for col in cols}
new_cols = list(rename_cols)
sub_df = df[new_cols].rename(columns=rename_cols)
df.drop(columns=new_cols, inplace=True)
# This only adds data--it doesn't replace existing data
df.fillna(sub_df, inplace=True)
self.__edge_prop_dataframe = df
# Update the edge eval dict with the latest column instances
self._update_eval_dict(
self.__edge_prop_eval_dict,
self.__edge_prop_dataframe,
self.edge_id_col_name,
)
def get_edge_data(self, edge_ids=None, types=None, columns=None):
"""
Return a dataframe containing edge properties for only the specified
edge_ids, columns, and/or edge type, or all edge IDs if not specified.
Parameters
----------
edge_ids : int or collection of int, optional
The list of edges to include in the edge data
types : list, optional
List of edge types to include in returned dataframe.
None is the default and will return all edge types.
columns : which edge columns will be returned, optional
None is the default and will result in all columns being returned
Returns
-------
Dataframe
Containing edge ids, type edge source, destination
and all the columns specified in the columns parameter
Examples
--------
>>> import cudf
>>> import cugraph
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> pG.get_edge_data(types="etype").sort_index(axis=1)
_DST_ _EDGE_ID_ _SRC_ _TYPE_ some_property
0 22 0 99 etype a
1 34 1 98 etype b
2 56 2 97 etype c
3 88 3 96 etype d
"""
if self.__edge_prop_dataframe is not None:
df = self.__edge_prop_dataframe
if edge_ids is not None:
if isinstance(edge_ids, int):
edge_ids = [edge_ids]
try:
df = df.loc[edge_ids]
except TypeError:
raise TypeError(
"edge_ids needs to be a list-like type "
f"compatible with DataFrame.loc[], got {type(edge_ids)}"
)
if types is not None:
if isinstance(types, str):
df_mask = df[self.type_col_name] == types
else:
df_mask = df[self.type_col_name].isin(types)
df = df.loc[df_mask]
# The "internal" src, dst, edge_id, and type columns are also
# included/added since they are assumed to be needed by the caller.
if columns is None:
# remove the "internal" weight column if one was added
all_columns = list(self.__edge_prop_dataframe.columns)
if self.weight_col_name in all_columns:
all_columns.remove(self.weight_col_name)
df = df[all_columns]
else:
# FIXME: invalid columns will result in a KeyError, should a
# check be done here and a more PG-specific error raised?
df = df[
[self.src_col_name, self.dst_col_name, self.type_col_name] + columns
]
# Should not drop so the edge ids are returned as a column.
df_out = df.reset_index()
# Preserve the dtype (edge id type) to avoid cugraph algorithms
# throwing errors due to a dtype mismatch
index_dtype = self.__edge_prop_dataframe.index.dtype
df_out.index = df_out.index.astype(index_dtype)
return df_out
return None
def fillna_vertices(self, val=0):
"""
Fills empty vertex property values with the given value, zero by default.
Fills in-place.
Parameters
----------
val : object, Series, or dict
The object that will replace "na". Default = 0. If a dict or
Series is passed, the index or keys are the columns to fill
and the values are the fill value for the corresponding column.
"""
self.__vertex_prop_dataframe.fillna(val, inplace=True)
def fillna_edges(self, val=0):
"""
Fills empty edge property values with the given value, zero by default.
Fills in-place.
Parameters
----------
val : object, Series, or dict
The object that will replace "na". Default = 0. If a dict or
Series is passed, the index or keys are the columns to fill
and the values are the fill value for the corresponding column.
"""
self.__edge_prop_dataframe.fillna(val, inplace=True)
def select_vertices(self, expr, from_previous_selection=None):
"""
Evaluate expr and return a PropertySelection object representing the
vertices that match the expression.
Parameters
----------
expr : string
A python expression using property names and operators to select
specific vertices.
from_previous_selection : PropertySelection, optional
A PropertySelection instance returned from a prior call to
select_vertices() that can be used to select a subset of vertices
to evaluate the expression against. This allows for a selection of
the intersection of vertices of multiple types (eg. all vertices
that are both type A and type B)
Returns
-------
PropertySelection
used for calls to extract_subgraph()
in order to construct a Graph containing only specific vertices.
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> vert_df = cudf.DataFrame({"vert_id": [99, 22, 98, 34, 97, 56, 96, 88],
... "v_prop": [1, 2, 3, 4, 5, 6, 7, 8]})
>>> pG.add_vertex_data(vert_df, type_name="vtype", vertex_col_name="vert_id")
>>> selection = pG.select_vertices("(_TYPE_ == 'vtype') & (v_prop > 4)")
>>> G = pG.extract_subgraph(selection=selection)
>>> print (G.number_of_vertices())
4
"""
# FIXME: check types
# Check if the expr is to be evaluated in the context of properties
# from only the previously selected vertices (as opposed to all
# properties from all vertices)
if (
from_previous_selection is not None
and from_previous_selection.vertex_selections is not None
):
previously_selected_rows = self.__vertex_prop_dataframe[
from_previous_selection.vertex_selections
]
rows_to_eval = self.__vertex_prop_dataframe.loc[
previously_selected_rows.index
]
locals = dict([(n, rows_to_eval[n]) for n in rows_to_eval.columns])
locals[self.vertex_col_name] = rows_to_eval.index
else:
locals = self.__vertex_prop_eval_dict
globals = {}
selected_col = eval(expr, globals, locals)
num_rows = len(self.__vertex_prop_dataframe)
# Ensure the column is the same size as the DataFrame, then replace any
# NA values with False to represent rows that should not be selected.
# This ensures the selected column can be applied to the entire
# __vertex_prop_dataframe to determine which rows to use when creating
# a Graph from a query.
if num_rows != len(selected_col):
selected_col = selected_col.reindex(
self.__vertex_prop_dataframe.index, fill_value=False, copy=False
)
return EXPERIMENTAL__PropertySelection(vertex_selection_series=selected_col)
def select_edges(self, expr):
"""
Evaluate expr and return a PropertySelection object representing the
edges that match the expression selection criteria.
Parameters
----------
expr : string
A python expression using property names and operators to select
specific edges.
Returns
-------
PropertySelection
Can be used for calls to extract_subgraph()
in order to construct a Graph containing only specific edges.
Examples
--------
>>> import cudf
>>> import cugraph
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> vert_df = cudf.DataFrame({"vert_id": [99, 22, 98, 34, 97, 56, 96, 88],
... "v_prop": [1, 2, 3, 4, 5, 6, 7, 8]})
>>> pG.add_vertex_data(vert_df, type_name="vtype", vertex_col_name="vert_id")
>>> selection = pG.select_edges("(_TYPE_ == 'etype') & (some_property == 'd')")
>>> G = pG.extract_subgraph(selection=selection,
... create_using=cugraph.Graph(directed=True),
... renumber_graph=False)
>>> print (G.edges())
src dst
0 96 88
"""
# FIXME: check types
globals = {}
locals = self.__edge_prop_eval_dict
selected_col = eval(expr, globals, locals)
return EXPERIMENTAL__PropertySelection(edge_selection_series=selected_col)
def extract_subgraph(
self,
create_using=None,
selection=None,
edge_weight_property=None,
default_edge_weight=None,
check_multi_edges=True,
renumber_graph=True,
add_edge_data=True,
):
"""
Return a subgraph of the overall PropertyGraph containing vertices
and edges that match a selection.
Parameters
----------
create_using : type or instance of cugraph.Graph or PropertyGraph, optional
Creates a Graph to return using the type specified. If an instance
is specified, the type of the instance is used to construct the
return Graph, and all relevant attributes set on the instance are
copied to the return Graph (eg. directed). If not specified the
returned Graph will be a directed cugraph.MultiGraph instance.
selection : PropertySelection, optional
A PropertySelection returned from one or more calls to
select_vertices() and/or select_edges(), used for creating a Graph
with only the selected properties. If not specified the returned
Graph will have all properties. Note, this could result in a Graph
with multiple edges, which may not be supported based on the value
of create_using.
edge_weight_property : string, optional
The name of the property whose values will be used as weights on
the returned Graph. If not specified, the returned Graph will be
unweighted. Ignored for PropertyGraph return type.
default_edge_weight : float64, optional
Value that replaces empty weight property fields.
Ignored for PropertyGraph return type.
check_multi_edges : bool (default True)
When True and create_using argument is given and not a MultiGraph,
this will perform an expensive check to verify that the edges in
the edge dataframe do not form a multigraph with duplicate edges.
Ignored for PropertyGraph return type.
renumber_graph : bool (default True)
If True, return a Graph that has been renumbered for use by graph
algorithms. If False, the returned graph will need to be manually
renumbered prior to calling graph algos.
Ignored for PropertyGraph return type.
add_edge_data : bool (default True)
If True, add meta data about the edges contained in the extracted
graph which are required for future calls to annotate_dataframe().
Ignored for PropertyGraph return type.
Returns
-------
A Graph instance of the same type as create_using containing only the
vertices and edges resulting from applying the selection to the set of
vertex and edge property data.
Examples
--------
>>> import cugraph
>>> from cugraph.experimental import PropertyGraph
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> vert_df = cudf.DataFrame({"vert_id": [99, 22, 98, 34, 97, 56, 96, 88],
... "v_prop": [1, 2, 3, 4, 5, 6, 7, 8]})
>>> pG.add_vertex_data(vert_df, type_name="vtype", vertex_col_name="vert_id")
>>> selection = pG.select_edges("(_TYPE_ == 'etype') & (some_property == 'd')")
>>> G = pG.extract_subgraph(selection=selection,
... create_using=cugraph.Graph(directed=True),
... renumber_graph=False)
>>> print (G.edges())
src dst
0 96 88
"""
if selection is not None and not isinstance(
selection, EXPERIMENTAL__PropertySelection
):
raise TypeError(
"selection must be an instance of "
f"PropertySelection, got {type(selection)}"
)
# NOTE: the expressions passed in to extract specific edges and
# vertices assume the original dtypes in the user input have been
# preserved. However, merge operations on the DataFrames can change
# dtypes (eg. int64 to float64 in order to add NaN entries). This
# should not be a problem since the conversions do not change the
# values.
if selection is not None and selection.vertex_selections is not None:
selected_vertex_dataframe = self.__vertex_prop_dataframe[
selection.vertex_selections
]
else:
selected_vertex_dataframe = None
if selection is not None and selection.edge_selections is not None:
selected_edge_dataframe = self.__edge_prop_dataframe[
selection.edge_selections
]
else:
selected_edge_dataframe = self.__edge_prop_dataframe
# FIXME: check that self.__edge_prop_dataframe is set!
# If vertices were specified, select only the edges that contain the
# selected verts in both src and dst
if (
selected_vertex_dataframe is not None
and not selected_vertex_dataframe.empty
):
has_srcs = selected_edge_dataframe[self.src_col_name].isin(
selected_vertex_dataframe.index
)
has_dsts = selected_edge_dataframe[self.dst_col_name].isin(
selected_vertex_dataframe.index
)
edges = selected_edge_dataframe[has_srcs & has_dsts]
# Alternative to benchmark
# edges = selected_edge_dataframe.merge(
# selected_vertex_dataframe[[]],
# left_on=self.src_col_name,
# right_index=True,
# ).merge(
# selected_vertex_dataframe[[]],
# left_on=self.dst_col_name,
# right_index=True,
# )
else:
edges = selected_edge_dataframe
# Default create_using set here instead of function signature to
# prevent cugraph from running on import. This may help diagnose errors
create_kind = "cugraph"
if create_using is None:
create_using = cugraph.MultiGraph(directed=True)
elif isinstance(create_using, type(self)):
rv = type(create_using)()
create_kind = "propertygraph"
elif type(create_using) is type and issubclass(create_using, type(self)):
rv = create_using()
create_kind = "propertygraph"
if create_kind == "cugraph":
# The __*_prop_dataframes have likely been merged several times and
# possibly had their dtypes converted in order to accommodate NaN
# values. Restore the original dtypes in the resulting edges df prior
# to creating a Graph.
edges = self.__update_dataframe_dtypes(edges, self.__edge_prop_dtypes)
return self.edge_props_to_graph(
edges,
create_using=create_using,
edge_weight_property=edge_weight_property,
default_edge_weight=default_edge_weight,
check_multi_edges=check_multi_edges,
renumber_graph=renumber_graph,
add_edge_data=add_edge_data,
)
# Return a subgraph as PropertyGraph
if (
selected_vertex_dataframe is None
and self.__vertex_prop_dataframe is not None
):
selected_vertex_dataframe = self.__vertex_prop_dataframe.copy()
num_vertices = self.__num_vertices
vertex_type_value_counts = self.__vertex_type_value_counts
else:
num_vertices = None
vertex_type_value_counts = None
if edges is not None and edges is self.__edge_prop_dataframe:
edges = edges.copy()
edge_type_value_counts = self.__edge_type_value_counts
else:
edge_type_value_counts = None
rv._build_from_components(
vertex_prop_dataframe=selected_vertex_dataframe,
edge_prop_dataframe=edges,
dataframe_type=self.__dataframe_type,
series_type=self.__series_type,
vertex_prop_dtypes=dict(self.__vertex_prop_dtypes),
edge_prop_dtypes=dict(self.__edge_prop_dtypes),
vertex_vector_property_lengths=dict(self.__vertex_vector_property_lengths),
edge_vector_property_lengths=dict(self.__edge_vector_property_lengths),
last_edge_id=self.__last_edge_id,
is_edge_id_autogenerated=self.__is_edge_id_autogenerated,
# Cached properties
num_vertices=num_vertices,
vertex_type_value_counts=vertex_type_value_counts,
edge_type_value_counts=edge_type_value_counts,
)
return rv
def annotate_dataframe(self, df, G, edge_vertex_col_names):
"""
Add properties to df that represent the vertices and edges in graph G.
Parameters
----------
df : cudf.DataFrame or pandas.DataFrame
A DataFrame containing edges identified by edge_vertex_col_names
which will have properties for those edges added to it.
G : cugraph.Graph (or subclass of) instance.
Graph containing the edges specified in df. The Graph instance must
have been generated from a prior call to extract_subgraph() in
order to have the edge meta-data used to look up the correct
properties.
edge_vertex_col_names : tuple of strings
The column names in df that represent the source and destination
vertices, used for identifying edges.
Returns
-------
A copy of df with additional columns corresponding to properties for
the edge in the row.
Examples
--------
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> G = pG.extract_subgraph(create_using=cugraph.Graph(directed=True))
>>> # Represents results of an algorithm run on the graph returning a dataframe
>>> algo_result = cudf.DataFrame({"from":df.src,
... "to":df.dst,
... "result": range(len(df.src))})
>>> algo_result2 = pG.annotate_dataframe(algo_result,
... G,
... edge_vertex_col_names=("from", "to"))
>>> print (algo_result2.sort_index(axis=1))
_EDGE_ID_ _TYPE_ from result some_property to
0 0 etype 99 0 a 22
1 1 etype 98 1 b 34
2 2 etype 97 2 c 56
3 3 etype 96 3 d 88
"""
# FIXME: check all args
# FIXME: also provide the ability to annotate vertex data.
(src_col_name, dst_col_name) = edge_vertex_col_names
df_type = type(df)
if df_type is not self.__dataframe_type:
raise TypeError(
f"df type {df_type} does not match DataFrame type "
f"{self.__dataframe_type} used in PropertyGraph"
)
if hasattr(G, "edge_data"):
edge_info_df = G.edge_data
else:
raise AttributeError("Graph G does not have attribute 'edge_data'")
# Join on shared columns and the indices
cols = self.__edge_prop_dataframe.columns.intersection(
edge_info_df.columns
).to_list()
cols.append(self.edge_id_col_name)
# New result includes only properties from the src/dst edges identified
# by edge IDs. All other data in df is merged based on src/dst values.
# NOTE: results from MultiGraph graphs will have to include edge IDs!
edge_props_df = edge_info_df.merge(
self.__edge_prop_dataframe, on=cols, how="inner"
)
# FIXME: also allow edge ID col to be passed in and renamed.
new_df = df.rename(
columns={src_col_name: self.src_col_name, dst_col_name: self.dst_col_name}
)
new_df = new_df.merge(edge_props_df)
# restore the original src/dst column names
new_df.rename(
columns={self.src_col_name: src_col_name, self.dst_col_name: dst_col_name},
inplace=True,
)
# restore the original dtypes
new_df = self.__update_dataframe_dtypes(new_df, self.__edge_prop_dtypes)
for col in df.columns:
new_df[col] = new_df[col].astype(df.dtypes[col])
# FIXME: consider removing internal columns (_EDGE_ID_, etc.) and
# columns from edge types not included in the edges in df.
return new_df
def edge_props_to_graph(
self,
edge_prop_df,
create_using,
edge_weight_property=None,
default_edge_weight=None,
check_multi_edges=True,
renumber_graph=True,
add_edge_data=True,
):
"""
Create a Graph from the edges in edge_prop_df.
Parameters
----------
edge_prop_df : cudf.DataFrame or pandas.DataFrame
conains the edge data with properties
create_using : cugraph.Graph (or subclass of) instance.
Attributes of the graph are passed to the returned graph.
edge_weight_property : string, optional
Property used to weight the returned graph.
default_edge_weight : float64, optional
Value used to replace NA in the specified weight column
check_multi_edges : bool, optional (default=True)
Prevent duplicate edges (if not allowed)
renumber_graph : bool, optional (default=True)
If True renumber edge Ids to start at 0, otherwise
maintain the original ids
add_edge_data bool, optional(default=True)
Returns
-------
A CuGraph or NetworkX Graph
contains the edges in edge_prop_df
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> G = pG.edge_props_to_graph(pG.edges,
... create_using=cugraph.Graph(),
... renumber_graph=False)
>>> G.edges()
src dst
0 88 96
1 22 99
2 56 97
3 34 98
"""
# Don't mutate input data, and ensure DataFrame is not a view
edge_prop_df = edge_prop_df.copy()
# FIXME: check default_edge_weight is valid
if edge_weight_property:
if (
edge_weight_property not in edge_prop_df.columns
and edge_prop_df.index.name != edge_weight_property
):
raise ValueError(
"edge_weight_property "
f'"{edge_weight_property}" was not found in '
"edge_prop_df"
)
# Ensure a valid edge_weight_property can be used for applying
# weights to the subgraph, and if a default_edge_weight was
# specified, apply it to all NAs in the weight column.
# Also allow the type column to be specified as the edge weight
# property so that uniform_neighbor_sample can be called with
# the weights interpreted as types.
if edge_weight_property == self.type_col_name:
prop_col = edge_prop_df[self.type_col_name].cat.codes.astype("float32")
edge_prop_df["_temp_type_col"] = prop_col
edge_weight_property = "_temp_type_col"
elif edge_weight_property in edge_prop_df.columns:
prop_col = edge_prop_df[edge_weight_property]
else:
prop_col = edge_prop_df.index.to_series()
if prop_col.count() != prop_col.size:
if default_edge_weight is None:
raise ValueError(
f'edge_weight_property "{edge_weight_property}" '
"contains NA values in the subgraph and "
"default_edge_weight is not set"
)
prop_col = prop_col.fillna(default_edge_weight)
if edge_weight_property in edge_prop_df.columns:
edge_prop_df[edge_weight_property] = prop_col
else:
edge_prop_df.index = prop_col
edge_attr = edge_weight_property
# If a default_edge_weight was specified but an edge_weight_property
# was not, a new edge weight column must be added.
elif default_edge_weight:
edge_attr = self.weight_col_name
edge_prop_df[edge_attr] = default_edge_weight
else:
edge_attr = None
# Set up the new Graph to return
if isinstance(create_using, cugraph.Graph):
# FIXME: extract more attrs from the create_using instance
attrs = {"directed": create_using.is_directed()}
G = type(create_using)(**attrs)
elif type(create_using) is type and issubclass(create_using, cugraph.Graph):
G = create_using()
else:
raise TypeError(
"create_using must be a cugraph.Graph "
"(or subclass) type or instance, got: "
f"{type(create_using)}"
)
# Prevent duplicate edges (if not allowed) since applying them to
# non-MultiGraphs would result in ambiguous edge properties.
if (
check_multi_edges
and not G.is_multigraph()
and self.is_multigraph(edge_prop_df)
):
if create_using:
if type(create_using) is type:
t = create_using.__name__
else:
t = type(create_using).__name__
msg = f"'{t}' graph type specified by create_using"
else:
msg = "default Graph graph type"
raise RuntimeError(
"query resulted in duplicate edges which "
f"cannot be represented with the {msg}"
)
create_args = {
"source": self.src_col_name,
"destination": self.dst_col_name,
"edge_attr": edge_attr,
"renumber": renumber_graph,
}
if type(edge_prop_df) is cudf.DataFrame:
G.from_cudf_edgelist(edge_prop_df.reset_index(), **create_args)
else:
G.from_pandas_edgelist(edge_prop_df.reset_index(), **create_args)
if add_edge_data:
# Set the edge_data on the resulting Graph to a DataFrame
# containing the edges and the edge ID for each. Edge IDs are
# needed for future calls to annotate_dataframe() in order to
# associate edges with their properties, since the PG can contain
# multiple edges between vertrices with different properties.
# FIXME: also add vertex_data
G.edge_data = self.__create_property_lookup_table(edge_prop_df)
return G
def renumber_vertices_by_type(self, prev_id_column=None):
"""
Renumber vertex IDs to be contiguous by type.
Parameters
----------
prev_id_column : str, optional
Column name to save the vertex ID before renumbering.
Returns
-------
a DataFrame with the start and stop IDs for each vertex type.
Stop is *inclusive*.
Examples
--------
>>> import cugraph
>>> import cudf
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> vert_df1 = cudf.DataFrame({"vert_id": [99, 22, 98, 34],
... "v_prop": [1 ,2 ,3, 4]})
>>> pG.add_vertex_data(vert_df1, type_name="vtype1", vertex_col_name="vert_id")
>>> vert_df2 = cudf.DataFrame({"vert_id": [97, 56, 96, 88],
... "v_prop": [ 5, 6, 7, 8]})
>>> pG.add_vertex_data(vert_df2, type_name="vtype2", vertex_col_name="vert_id")
>>> pG.renumber_vertices_by_type()
start stop
vtype1 0 3
vtype2 4 7
"""
# Check if some vertex IDs exist only in edge data
TCN = self.type_col_name
default = self._default_type_name
if self.__edge_prop_dataframe is not None and self.get_num_vertices(
default, include_edge_data=True
) != self.get_num_vertices(default, include_edge_data=False):
raise NotImplementedError(
"Currently unable to renumber vertices when some vertex "
"IDs only exist in edge data"
)
if self.__vertex_prop_dataframe is None:
return None
if (
prev_id_column is not None
and prev_id_column in self.__vertex_prop_dataframe
):
raise ValueError(
f"Can't save previous IDs to existing column {prev_id_column!r}"
)
# Use categorical dtype for the type column
if self.__series_type is cudf.Series:
cat_class = cudf.CategoricalDtype
else:
cat_class = pd.CategoricalDtype
is_cat = isinstance(self.__vertex_prop_dataframe.dtypes[TCN], cat_class)
if not is_cat:
cat_dtype = cat_class([TCN], ordered=False)
self.__vertex_prop_dataframe[TCN] = self.__vertex_prop_dataframe[
TCN
].astype(cat_dtype)
index_dtype = self.__vertex_prop_dataframe.index.dtype
df = self.__vertex_prop_dataframe.reset_index()
if len(df.dtypes[TCN].categories) > 1 and len(self.vertex_types) > 1:
# Avoid `sort_values` if we know there is only one type
# `self.vertex_types` is currently not cheap, b/c it looks at edge df
df = df.sort_values(by=TCN, ignore_index=True)
df.index = df.index.astype(index_dtype)
if self.__edge_prop_dataframe is not None:
mapper = self.__series_type(df.index, index=df[self.vertex_col_name])
self.__edge_prop_dataframe[self.src_col_name] = self.__edge_prop_dataframe[
self.src_col_name
].map(mapper)
self.__edge_prop_dataframe[self.dst_col_name] = self.__edge_prop_dataframe[
self.dst_col_name
].map(mapper)
if prev_id_column is None:
df.drop(columns=[self.vertex_col_name], inplace=True)
else:
df.rename(columns={self.vertex_col_name: prev_id_column}, inplace=True)
df.index.name = self.vertex_col_name
self.__vertex_prop_dataframe = df
rv = self._vertex_type_value_counts.sort_index().cumsum().to_frame("stop")
rv["start"] = rv["stop"].shift(1, fill_value=0)
rv["stop"] -= 1 # Make inclusive
return rv[["start", "stop"]]
def renumber_edges_by_type(self, prev_id_column=None):
"""
Renumber edge IDs to be contiguous by type.
Parameters
----------
prev_id_column : str, optional
Column name to save the edge ID before renumbering.
Returns
-------
DataFrame
with the start and stop IDs for each edge type. Stop is *inclusive*.
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> pG = PropertyGraph()
>>> df = cudf.DataFrame(columns=["src", "dst", "edge_ids" ,"some_property"],
... data=[(99, 22, 3, "a"),
... (98, 34, 5, "b"),
... (97, 56, 7, "c"),
... (96, 88, 11, "d"),
... ])
>>> df2 = cudf.DataFrame(columns=["src", "dst", "edge_ids" ,"some_property"],
... data=[(95, 24, 2, "a"),
... (94, 36, 4, "b"),
... (93, 88, 8, "d"),
... ])
>>> pG.add_edge_data(df,
... type_name="etype1",
... vertex_col_names=("src", "dst"),
... edge_id_col_name="edge_ids")
>>> pG.add_edge_data(df2,
... type_name="etype2",
... vertex_col_names=("src", "dst"),
... edge_id_col_name="edge_ids")
>>> pG.renumber_edges_by_type()
start stop
etype1 0 3
etype2 4 6
"""
TCN = self.type_col_name
if self.__edge_prop_dataframe is None:
return None
if prev_id_column is not None and prev_id_column in self.__edge_prop_dataframe:
raise ValueError(
f"Can't save previous IDs to existing column {prev_id_column!r}"
)
# Use categorical dtype for the type column
if self.__series_type is cudf.Series:
cat_class = cudf.CategoricalDtype
else:
cat_class = pd.CategoricalDtype
is_cat = isinstance(self.__edge_prop_dataframe.dtypes[TCN], cat_class)
if not is_cat:
cat_dtype = cat_class([TCN], ordered=False)
self.__edge_prop_dataframe[TCN] = self.__edge_prop_dataframe[TCN].astype(
cat_dtype
)
df = self.__edge_prop_dataframe
index_dtype = df.index.dtype
if prev_id_column is None:
if len(df.dtypes[TCN].categories) > 1 and len(self.edge_types) > 1:
# Avoid `sort_values` if we know there is only one type
df = df.sort_values(by=TCN, ignore_index=True)
else:
df.reset_index(drop=True, inplace=True)
else:
if len(df.dtypes[TCN].categories) > 1 and len(self.edge_types) > 1:
# Avoid `sort_values` if we know there is only one type
df = df.sort_values(by=TCN)
df.index.name = prev_id_column
df.reset_index(inplace=True)
df.index = df.index.astype(index_dtype)
df.index.name = self.edge_id_col_name
self.__edge_prop_dataframe = df
rv = self._edge_type_value_counts.sort_index().cumsum().to_frame("stop")
rv["start"] = rv["stop"].shift(1, fill_value=0)
rv["stop"] -= 1 # Make inclusive
return rv[["start", "stop"]]
def vertex_vector_property_to_array(
self, df, col_name, fillvalue=None, *, missing="ignore"
):
"""Convert a known vertex vector property in a DataFrame to an array.
Parameters
----------
df : cudf.DataFrame or pandas.DataFrame
If cudf.DataFrame, the result will be a cupy.ndarray.
If pandas.DataFrame, the result will be a numpy.ndarray.
col_name : str
The column name in the DataFrame to convert to an array.
This vector property should have been created by PropertyGraph.
fillvalue : scalar or list, optional (default None)
Fill value for rows with missing vector data. If it is a list,
it must be the correct size of the vector property. If fillvalue is None,
then behavior if missing data is controlled by ``missing`` keyword.
Leave this as None for better performance if all rows should have data.
missing : {"ignore", "error"}
If "ignore", empty or null rows without vector data will be skipped
when creating the array, so output array shape will be
[# of non-empty rows] by [size of vector property].
When "error", RuntimeError will be raised if there are any empty rows.
Ignored if fillvalue is given.
Returns
-------
cupy.ndarray or numpy.ndarray
"""
if col_name not in self.__vertex_vector_property_lengths:
raise ValueError(f"{col_name!r} is not a known vertex vector property")
length = self.__vertex_vector_property_lengths[col_name]
return self._get_vector_property(df, col_name, length, fillvalue, missing)
def edge_vector_property_to_array(
self, df, col_name, fillvalue=None, *, missing="ignore"
):
"""Convert a known edge vector property in a DataFrame to an array.
Parameters
----------
df : cudf.DataFrame or pandas.DataFrame
If cudf.DataFrame, the result will be a cupy.ndarray.
If pandas.DataFrame, the result will be a numpy.ndarray.
col_name : str
The column name in the DataFrame to convert to an array.
This vector property should have been created by PropertyGraph.
fillvalue : scalar or list, optional (default None)
Fill value for rows with missing vector data. If it is a list,
it must be the correct size of the vector property. If fillvalue is None,
then behavior if missing data is controlled by ``missing`` keyword.
Leave this as None for better performance if all rows should have data.
missing : {"ignore", "error"}
If "ignore", empty or null rows without vector data will be skipped
when creating the array, so output array shape will be
[# of non-empty rows] by [size of vector property].
When "error", RuntimeError will be raised if there are any empty rows.
Ignored if fillvalue is given.
Returns
-------
cupy.ndarray or numpy.ndarray
"""
if col_name not in self.__edge_vector_property_lengths:
raise ValueError(f"{col_name!r} is not a known edge vector property")
length = self.__edge_vector_property_lengths[col_name]
return self._get_vector_property(df, col_name, length, fillvalue, missing)
def _check_vector_properties(
self, df, vector_properties, vector_property_lengths, invalid_keys
):
"""Check if vector_properties is valid and update vector_property_lengths"""
df_cols = set(df.columns)
for key, columns in vector_properties.items():
if key in invalid_keys:
raise ValueError(
"Cannot assign new vector property to existing "
f"non-vector property: {key}"
)
if isinstance(columns, str):
# If df[columns] is a ListDtype column, should we allow it?
raise TypeError(
f"vector property columns for {key!r} should be a list; "
f"got a str ({columns!r})"
)
if not df_cols.issuperset(columns):
missing = ", ".join(set(columns) - df_cols)
raise ValueError(
f"Dataframe does not have columns for vector property {key!r}:"
f"{missing}"
)
if not columns:
raise ValueError("Empty vector property columns for {key!r}!")
if vector_property_lengths.get(key, len(columns)) != len(columns):
prev_length = vector_property_lengths[key]
new_length = len(columns)
raise ValueError(
f"Wrong size for vector property {key}; got {new_length}, but "
f"this vector property already exists with size {prev_length}"
)
for key, columns in vector_properties.items():
vector_property_lengths[key] = len(columns)
@staticmethod
def _create_vector_properties(df, vector_properties):
vectors = {}
for key, columns in vector_properties.items():
values = df[columns].values
if isinstance(df, cudf.DataFrame):
vectors[key] = create_list_series_from_2d_ar(values, index=df.index)
else:
vectors[key] = [
np.squeeze(vec, 0)
for vec in np.split(
np.ascontiguousarray(values, like=values), len(df)
)
]
for key, vec in vectors.items():
df[key] = vec
def _get_vector_property(self, df, col_name, length, fillvalue, missing):
if type(df) is not self.__dataframe_type:
raise TypeError(
f"Expected type {self.__dataframe_type}; got type {type(df)}"
)
if col_name not in df.columns:
raise ValueError(f"Column name {col_name} is not in the columns of df")
if missing not in {"error", "ignore"}:
raise ValueError(
f'missing keyword must be one of "error" or "ignore"; got {missing!r}'
)
if fillvalue is not None:
try:
fill = list(fillvalue)
except Exception:
fill = [fillvalue] * length
else:
if len(fill) != length:
raise ValueError(
f"Wrong size of list as fill value; got {len(fill)}, "
f"expected {length}"
)
s = df[col_name].copy() # copy b/c we mutate below
else:
s = df[col_name]
if self.__series_type is cudf.Series:
if df.dtypes[col_name] != "list":
raise TypeError(
"Wrong dtype for vector property; expected 'list', "
f"got {df.dtypes[col_name]}"
)
if fillvalue is not None:
s[s.isnull()] = fill
# This returns a writable view (i.e., no copies!)
rv = s._data.columns[0].children[-1].values.reshape(-1, length)
else:
if df.dtypes[col_name] != object:
raise TypeError(
"Wrong dtype for vector property; expected 'object', "
f"got {df.dtypes[col_name]}"
)
if fillvalue is not None:
a = np.empty(1, dtype=object)
a[0] = np.array(fill)
s[s.isnull()] = a
else:
s = s[s.notnull()]
rv = np.vstack(s.to_numpy())
if fillvalue is None and missing == "error" and rv.shape[0] != len(df):
raise RuntimeError(
f"Vector property {col_name!r} has empty rows! "
'Provide a fill value or use `missing="ignore"` to ignore empty rows.'
)
return rv
def is_multi_gpu(self):
"""
Return True if this is a multi-gpu graph. Always returns False for
PropertyGraph.
"""
return False
@classmethod
def is_multigraph(cls, df):
"""
Parameters
----------
df : dataframe
Containing edge data
Returns
-------
bool
True if df has one or more edges with the same source, destination pair
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> pG = PropertyGraph()
>>> df = cudf.DataFrame(columns=["src", "dst", "edge_ids", "some_property"],
... data=[(99, 22, 3, "a"),
... (98, 34, 5, "b"),
... (98, 34, 7, "c"),
... (96, 88, 11, "d"),
... ])
>>> pG.add_edge_data(df, type_name="etype",
... vertex_col_names=("src", "dst"),
... edge_id_col_name="edge_ids")
>>> pG.is_multigraph(pG.get_edge_data())
True
"""
return cls._has_duplicates(df, [cls.src_col_name, cls.dst_col_name])
@classmethod
def has_duplicate_edges(cls, df, columns=None):
"""
Return True if df has rows with the same src, dst, type, and columns
Parameters
----------
df : dataframe
Containing the edges to test test for duplicates
columns : list of strings, optional
List of column names to use when testing for duplicate edges in
addition to source, destination and type.
Returns
-------
bool
True if df has multiple rows with the same source, destination and type
plus columns that are specified.
Examples
--------
>>> import cugraph
>>> import cudf
>>> from cugraph.experimental import PropertyGraph
>>> df = cudf.DataFrame(columns=["src", "dst", "some_property"],
... data=[(99, 22, "a"),
... (98, 34, "b"),
... (97, 56, "c"),
... (96, 88, "d"),
... ])
>>> pG = PropertyGraph()
>>> pG.add_edge_data(df, type_name="etype", vertex_col_names=("src", "dst"))
>>> PropertyGraph.has_duplicate_edges(pG.get_edge_data())
False
"""
cols = [cls.src_col_name, cls.dst_col_name, cls.type_col_name]
if columns:
cols.extend(columns)
return cls._has_duplicates(df, cols)
@classmethod
def _has_duplicates(cls, df, cols):
"""
Checks for duplicate edges in the dataframe with the
provided columns being equal as the criteria.
"""
if df.empty:
return False
unique_pair_len = len(df[cols].drop_duplicates(ignore_index=True))
# if unique_pairs == len(df)
# then no duplicate edges
return unique_pair_len != len(df)
def __create_property_lookup_table(self, edge_prop_df):
"""
a DataFrame containing the src vertex, dst vertex, and edge_id
values from edge_prop_df.
"""
src = edge_prop_df[self.src_col_name]
dst = edge_prop_df[self.dst_col_name]
return self.__dataframe_type(
{self.src_col_name: src, self.dst_col_name: dst}
).reset_index()
def __get_all_vertices_series(self):
"""
Returns a list of all Series objects that contain vertices from all
tables.
"""
vpd = self.__vertex_prop_dataframe
epd = self.__edge_prop_dataframe
vert_sers = []
if vpd is not None:
vert_sers.append(vpd.index.to_series())
if epd is not None:
vert_sers.append(epd[self.src_col_name])
vert_sers.append(epd[self.dst_col_name])
return vert_sers
@staticmethod
def __get_new_column_dtypes(from_df, to_df):
"""
Returns a list containing tuples of (column name, dtype) for each
column in from_df that is not present in to_df.
"""
new_cols = set(from_df.columns) - set(to_df.columns)
return [(col, from_df.dtypes[col]) for col in new_cols]
@staticmethod
def __update_dataframe_dtypes(df, column_dtype_dict):
"""
Set the dtype for columns in df using the dtypes in column_dtype_dict.
This also handles converting standard integer dtypes to nullable
integer dtypes, needed to accommodate NA values in columns.
"""
update_cols = {}
for (col, dtype) in column_dtype_dict.items():
if col not in df.columns:
continue
# If the DataFrame is Pandas and the dtype is an integer type,
# ensure a nullable integer array is used by specifying the correct
# dtype. The alias for these dtypes is simply a capitalized string
# (eg. "Int64")
# https://pandas.pydata.org/pandas-docs/stable/user_guide/missing_data.html#integer-dtypes-and-missing-data
dtype_str = str(dtype)
if dtype_str in ["int32", "int64"]:
dtype_str = dtype_str.title()
if str(df.dtypes[col]) != dtype_str:
# Assigning to df[col] produces a (false?) warning with Pandas,
# but assigning to df.loc[:,col] does not update the df in
# cudf, so do one or the other based on type.
update_cols[col] = df[col].astype(dtype_str)
if not update_cols:
return df
# Use df.assign to avoid assignment into df in case df is a view:
# https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html
# #returning-a-view-versus-a-copy
# Note that this requires all column names to be strings.
return df.assign(**update_cols)
def __update_categorical_dtype(self, df, column, val):
"""
Add a new category to a categorical dtype column of a dataframe.
Returns the new categorical dtype.
"""
# Add `val` to the categorical dtype if necessary
if val not in df.dtypes[column].categories:
df[column] = df[column].cat.add_categories([val])
return df.dtypes[column]
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/replicate_edgelist.py | # Copyright (c) 2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dask_cudf
import cudf
from dask.distributed import wait, default_client
import numpy as np
from pylibcugraph import (
ResourceHandle,
replicate_edgelist as pylibcugraph_replicate_edgelist,
)
from cugraph.dask.common.part_utils import (
get_persisted_df_worker_map,
persist_dask_df_equal_parts_per_worker,
)
import dask
import cupy as cp
import cugraph.dask.comms.comms as Comms
from typing import Union, Tuple
# FIXME: Convert it to a general-purpose util function
def _convert_to_cudf(cp_arrays: Tuple[cp.ndarray], col_names: list) -> cudf.DataFrame:
"""
Creates a cudf Dataframe from cupy arrays
"""
src, dst, wgt, edge_id, edge_type_id, _ = cp_arrays
gathered_edgelist_df = cudf.DataFrame()
gathered_edgelist_df[col_names[0]] = src
gathered_edgelist_df[col_names[1]] = dst
if wgt is not None:
gathered_edgelist_df[col_names[2]] = wgt
if edge_id is not None:
gathered_edgelist_df[col_names[3]] = edge_id
if edge_type_id is not None:
gathered_edgelist_df[col_names[4]] = edge_type_id
return gathered_edgelist_df
def _call_plc_replicate_edgelist(
sID: bytes, edgelist_df: cudf.DataFrame, col_names: list
) -> cudf.DataFrame:
edgelist_df = edgelist_df[0]
cp_arrays = pylibcugraph_replicate_edgelist(
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
src_array=edgelist_df[col_names[0]],
dst_array=edgelist_df[col_names[1]],
weight_array=edgelist_df[col_names[2]] if len(col_names) > 2 else None,
edge_id_array=edgelist_df[col_names[3]] if len(col_names) > 3 else None,
edge_type_id_array=edgelist_df[col_names[4]] if len(col_names) > 4 else None,
)
return _convert_to_cudf(cp_arrays, col_names)
def _call_plc_replicate_dataframe(sID: bytes, df: cudf.DataFrame) -> cudf.DataFrame:
df = df[0]
df_replicated = cudf.DataFrame()
for col_name in df.columns:
cp_array = pylibcugraph_replicate_edgelist(
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
src_array=df[col_name]
if df[col_name].dtype in [np.int32, np.int64]
else None,
dst_array=None,
weight_array=df[col_name]
if df[col_name].dtype in [np.float32, np.float64]
else None,
edge_id_array=None,
edge_type_id_array=None,
)
src, _, wgt, _, _, _ = cp_array
if src is not None:
df_replicated[col_name] = src
elif wgt is not None:
df_replicated[col_name] = wgt
return df_replicated
def _call_plc_replicate_series(sID: bytes, series: cudf.Series) -> cudf.Series:
series = series[0]
series_replicated = cudf.Series()
cp_array = pylibcugraph_replicate_edgelist(
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
src_array=series if series.dtype in [np.int32, np.int64] else None,
dst_array=None,
weight_array=series if series.dtype in [np.float32, np.float64] else None,
edge_id_array=None,
edge_type_id_array=None,
)
src, _, wgt, _, _, _ = cp_array
if src is not None:
series_replicated = cudf.Series(src)
elif wgt is not None:
series_replicated = cudf.Series(wgt)
return series_replicated
def _mg_call_plc_replicate(
client: dask.distributed.client.Client,
sID: bytes,
dask_object: dict,
input_type: str,
col_names: list,
) -> Union[dask_cudf.DataFrame, dask_cudf.Series]:
if input_type == "dataframe":
result = [
client.submit(
_call_plc_replicate_dataframe,
sID,
edata,
workers=[w],
allow_other_workers=False,
pure=False,
)
for w, edata in dask_object.items()
]
elif input_type == "dataframe":
result = [
client.submit(
_call_plc_replicate_series,
sID,
edata,
workers=[w],
allow_other_workers=False,
pure=False,
)
for w, edata in dask_object.items()
]
elif input_type == "edgelist":
result = [
client.submit(
_call_plc_replicate_edgelist,
sID,
edata,
col_names,
workers=[w],
allow_other_workers=False,
pure=False,
)
for w, edata in dask_object.items()
]
ddf = dask_cudf.from_delayed(result, verify_meta=False).persist()
wait(ddf)
wait([r.release() for r in result])
return ddf
def replicate_edgelist(
edgelist_ddf: Union[dask_cudf.DataFrame, cudf.DataFrame] = None,
source="src",
destination="dst",
weight=None,
edge_id=None,
edge_type=None,
) -> dask_cudf.DataFrame:
"""
Replicate edges across all GPUs
Parameters
----------
edgelist_ddf: cudf.DataFrame or dask_cudf.DataFrame
A DataFrame that contains edge information.
source : str or array-like
source column name or array of column names
destination : str or array-like
destination column name or array of column names
weight : str, optional (default=None)
Name of the weight column in the input dataframe.
edge_id : str, optional (default=None)
Name of the edge id column in the input dataframe.
edge_type : str, optional (default=None)
Name of the edge type column in the input dataframe.
Returns
-------
df : dask_cudf.DataFrame
A distributed dataframe where each partition contains the
combined edgelist from all GPUs. If a cudf.DataFrame was passed
as input, the edgelist will be replicated across all the other
GPUs in the cluster. If as dask_cudf.DataFrame was passed as input,
each partition will be filled with the edges of all partitions
in the dask_cudf.DataFrame.
"""
_client = default_client()
if isinstance(edgelist_ddf, cudf.DataFrame):
edgelist_ddf = dask_cudf.from_cudf(
edgelist_ddf, npartitions=len(Comms.get_workers())
)
col_names = [source, destination]
if weight is not None:
col_names.append(weight)
if edge_id is not None:
col_names.append(edge_id)
if edge_type is not None:
col_names.append(edge_type)
if not (set(col_names).issubset(set(edgelist_ddf.columns))):
raise ValueError(
"Invalid column names were provided: valid columns names are "
f"{edgelist_ddf.columns}"
)
edgelist_ddf = persist_dask_df_equal_parts_per_worker(edgelist_ddf, _client)
edgelist_ddf = get_persisted_df_worker_map(edgelist_ddf, _client)
ddf = _mg_call_plc_replicate(
_client,
Comms.get_session_id(),
edgelist_ddf,
"edgelist",
col_names,
)
return ddf
def replicate_cudf_dataframe(cudf_dataframe):
"""
Replicate dataframe across all GPUs
Parameters
----------
cudf_dataframe: cudf.DataFrame or dask_cudf.DataFrame
Returns
-------
df : dask_cudf.DataFrame
A distributed dataframe where each partition contains the
combined dataframe from all GPUs. If a cudf.DataFrame was passed
as input, the dataframe will be replicated across all the other
GPUs in the cluster. If as dask_cudf.DataFrame was passed as input,
each partition will be filled with the datafame of all partitions
in the dask_cudf.DataFrame.
"""
supported_types = [np.int32, np.int64, np.float32, np.float64]
if not all(dtype in supported_types for dtype in cudf_dataframe.dtypes):
raise TypeError(
"The supported types are 'int32', 'int64', 'float32', 'float64'"
)
_client = default_client()
if not isinstance(cudf_dataframe, dask_cudf.DataFrame):
if isinstance(cudf_dataframe, cudf.DataFrame):
df = dask_cudf.from_cudf(
cudf_dataframe, npartitions=len(Comms.get_workers())
)
elif not isinstance(cudf_dataframe, dask_cudf.DataFrame):
raise TypeError(
"The variable 'cudf_dataframe' must be of type "
f"'cudf/dask_cudf.dataframe', got type {type(cudf_dataframe)}"
)
else:
df = cudf_dataframe
df = persist_dask_df_equal_parts_per_worker(df, _client)
df = get_persisted_df_worker_map(df, _client)
ddf = _mg_call_plc_replicate(
_client,
Comms.get_session_id(),
df,
"dataframe",
)
return ddf
def replicate_cudf_series(cudf_series):
"""
Replicate series across all GPUs
Parameters
----------
cudf_series: cudf.Series or dask_cudf.Series
Returns
-------
series : dask_cudf.Series
A distributed series where each partition contains the
combined series from all GPUs. If a cudf.Series was passed
as input, the Series will be replicated across all the other
GPUs in the cluster. If as dask_cudf.Series was passed as input,
each partition will be filled with the series of all partitions
in the dask_cudf.Series.
"""
supported_types = [np.int32, np.int64, np.float32, np.float64]
if cudf_series.dtype not in supported_types:
raise TypeError(
"The supported types are 'int32', 'int64', 'float32', 'float64'"
)
_client = default_client()
if not isinstance(cudf_series, dask_cudf.Series):
if isinstance(cudf_series, cudf.Series):
series = dask_cudf.from_cudf(
cudf_series, npartitions=len(Comms.get_workers())
)
elif not isinstance(cudf_series, dask_cudf.Series):
raise TypeError(
"The variable 'cudf_series' must be of type "
f"'cudf/dask_cudf.series', got type {type(cudf_series)}"
)
else:
series = cudf_series
series = persist_dask_df_equal_parts_per_worker(series, _client)
series = get_persisted_df_worker_map(series, _client)
series = _mg_call_plc_replicate(
_client,
Comms.get_session_id(),
series,
"series",
)
return series
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/symmetrize.py | # Copyright (c) 2019-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.structure import graph_classes as csg
import cudf
import dask_cudf
from dask.distributed import default_client
def symmetrize_df(
df, src_name, dst_name, weight_name=None, multi=False, symmetrize=True
):
"""
Take a COO stored in a DataFrame, along with the column names of
the source and destination columns and create a new data frame
using the same column names that symmetrize the graph so that all
edges appear in both directions.
Note that if other columns exist in the data frame (e.g. edge weights)
the other columns will also be replicated. That is, if (u,v,data)
represents the source value (u), destination value (v) and some
set of other columns (data) in the input data, then the output
data will contain both (u,v,data) and (v,u,data) with matching
data.
If (u,v,data1) and (v,u,data2) exist in the input data where data1
!= data2 then this code will arbitrarily pick the smaller data
element to keep, if this is not desired then the caller
should correct the data prior to calling symmetrize.
Parameters
----------
df : cudf.DataFrame
Input data frame containing COO. Columns should contain source
ids, destination ids and any properties associated with the
edges.
src_name : str or list
Name(s) of the column(s) in the data frame containing the source ids
dst_name : str or list
Name(s) of the column(s) in the data frame containing
the destination ids
weight_name : string, optional (default=None)
Name of the column in the data frame containing the weight ids
multi : bool, optional (default=False)
Set to True if graph is a Multi(Di)Graph. This allows multiple
edges instead of dropping them.
symmetrize : bool, optional (default=True)
Default is True to perform symmetrization. If False only duplicate
edges are dropped.
Examples
--------
>>> from cugraph.structure.symmetrize import symmetrize_df
>>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/..
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> sym_df = symmetrize_df(M, '0', '1')
"""
if not isinstance(src_name, list):
src_name = [src_name]
if not isinstance(dst_name, list):
dst_name = [dst_name]
if weight_name is not None and not isinstance(weight_name, list):
weight_name = [weight_name]
if symmetrize:
result = _add_reverse_edges(df, src_name, dst_name, weight_name)
else:
result = df
if multi:
return result
else:
vertex_col_name = src_name + dst_name
result = result.groupby(by=[*vertex_col_name], as_index=False).min()
return result
def symmetrize_ddf(
ddf, src_name, dst_name, weight_name=None, multi=False, symmetrize=True
):
"""
Take a COO stored in a distributed DataFrame, and the column names of
the source and destination columns and create a new data frame
using the same column names that symmetrize the graph so that all
edges appear in both directions.
Note that if other columns exist in the data frame (e.g. edge weights)
the other columns will also be replicated. That is, if (u,v,data)
represents the source value (u), destination value (v) and some
set of other columns (data) in the input data, then the output
data will contain both (u,v,data) and (v,u,data) with matching
data.
If (u,v,data1) and (v,u,data2) exist in the input data where data1
!= data2 then this code will arbitrarily pick the smaller data
element to keep, if this is not desired then the caller
should correct the data prior to calling symmetrize.
Parameters
----------
ddf : dask_cudf.DataFrame
Input data frame containing COO. Columns should contain source
ids, destination ids and any properties associated with the
edges.
src_name : str or list
Name(s) of the column(s) in the data frame containing the source ids
dst_name : str or list
Name(s) of the column(s) in the data frame containing
the destination ids
weight_name : string, optional (default=None)
Name of the column in the data frame containing the weight ids
multi : bool, optional (default=False)
Set to True if graph is a Multi(Di)Graph. This allows multiple
edges instead of dropping them.
symmetrize : bool, optional (default=True)
Default is True to perform symmetrization. If False only duplicate
edges are dropped.
Examples
--------
>>> # import cugraph.dask as dcg
>>> # from cugraph.structure.symmetrize import symmetrize_ddf
>>> # Init a DASK Cluster
>>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/..
>>> # chunksize = dcg.get_chunksize(datasets / 'karate.csv')
>>> # ddf = dask_cudf.read_csv(datasets/'karate.csv', chunksize=chunksize,
>>> # delimiter=' ',
>>> # names=['src', 'dst', 'weight'],
>>> # dtype=['int32', 'int32', 'float32'])
>>> # sym_ddf = symmetrize_ddf(ddf, "src", "dst", "weight")
"""
# FIXME: Uncomment out the above (broken) example
_client = default_client()
workers = _client.scheduler_info()["workers"]
if not isinstance(src_name, list):
src_name = [src_name]
if not isinstance(dst_name, list):
dst_name = [dst_name]
if weight_name is not None and not isinstance(weight_name, list):
weight_name = [weight_name]
if symmetrize:
result = ddf.map_partitions(_add_reverse_edges, src_name, dst_name, weight_name)
else:
result = ddf
if multi:
return result
else:
vertex_col_name = src_name + dst_name
result = _memory_efficient_drop_duplicates(
result, vertex_col_name, len(workers)
)
return result
def symmetrize(
input_df,
source_col_name,
dest_col_name,
value_col_name=None,
multi=False,
symmetrize=True,
):
"""
Take a dataframe of source destination pairs along with associated
values stored in a single GPU or distributed
create a COO set of source destination pairs along with values where
all edges exist in both directions.
Return from this call will be a COO stored as two/three cudf/dask_cudf
Series/Dataframe -the symmetrized source column and the symmetrized dest
column, along with an optional cudf/dask_cudf Series/DataFrame containing
the associated values (only if the values are passed in).
Parameters
----------
input_df : cudf.DataFrame or dask_cudf.DataFrame
The edgelist as a cudf.DataFrame or dask_cudf.DataFrame
source_col_name : str or list
source column name.
dest_col_name : str or list
destination column name.
value_col_name : str or None
weights column name.
multi : bool, optional (default=False)
Set to True if graph is a Multi(Di)Graph. This allows multiple
edges instead of dropping them.
symmetrize : bool, optional
Default is True to perform symmetrization. If False only duplicate
edges are dropped.
Examples
--------
>>> from cugraph.structure.symmetrize import symmetrize
>>> # Download dataset from https://github.com/rapidsai/cugraph/datasets/..
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> df = cudf.DataFrame()
>>> df['sources'] = cudf.Series(M['0'])
>>> df['destinations'] = cudf.Series(M['1'])
>>> df['values'] = cudf.Series(M['2'])
>>> src, dst, val = symmetrize(df, 'sources', 'destinations', 'values')
"""
# FIXME: Redundant check that should be done at the graph creation
if "edge_id" in input_df.columns and symmetrize:
raise ValueError("Edge IDs are not supported on undirected graphs")
csg.null_check(input_df[source_col_name])
csg.null_check(input_df[dest_col_name])
if isinstance(input_df, dask_cudf.DataFrame):
output_df = symmetrize_ddf(
input_df,
source_col_name,
dest_col_name,
value_col_name,
multi,
symmetrize,
)
else:
output_df = symmetrize_df(
input_df,
source_col_name,
dest_col_name,
value_col_name,
multi,
symmetrize,
)
if value_col_name is not None:
value_col = output_df[value_col_name]
if isinstance(value_col, (cudf.Series, dask_cudf.Series)):
return (
output_df[source_col_name],
output_df[dest_col_name],
output_df[value_col_name],
)
elif isinstance(value_col, (cudf.DataFrame, dask_cudf.DataFrame)):
return (
output_df[source_col_name],
output_df[dest_col_name],
output_df[value_col.columns],
)
return output_df[source_col_name], output_df[dest_col_name]
def _add_reverse_edges(df, src_name, dst_name, weight_name):
"""
Add reverse edges to the input dataframe.
args:
df: cudf.DataFrame or dask_cudf.DataFrame
src_name: str
source column name
dst_name: str
destination column name
weight_name: str
weight column name
"""
if weight_name:
reverse_df = df[[*dst_name, *src_name, *weight_name]]
reverse_df.columns = [*src_name, *dst_name, *weight_name]
else:
reverse_df = df[[*dst_name, *src_name]]
reverse_df.columns = [*src_name, *dst_name]
return cudf.concat([df, reverse_df], ignore_index=True)
def _memory_efficient_drop_duplicates(ddf, vertex_col_name, num_workers):
"""
Drop duplicate edges from the input dataframe.
"""
# drop duplicates has a 5x+ overhead
# and does not seem to be working as expected
# TODO: Triage an MRE
ddf = ddf.reset_index(drop=True).repartition(npartitions=num_workers * 2)
ddf = ddf.groupby(by=[*vertex_col_name], as_index=False).min(
split_out=num_workers * 2
)
return ddf
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_implementation/simpleGraph.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph.structure import graph_primtypes_wrapper
from cugraph.structure.graph_primtypes_wrapper import Direction
from cugraph.structure.symmetrize import symmetrize
from cugraph.structure.number_map import NumberMap
import cugraph.dask.common.mg_utils as mg_utils
import cudf
import dask_cudf
import cugraph.dask.comms.comms as Comms
import pandas as pd
import numpy as np
import warnings
from cugraph.dask.structure import replication
from typing import Union, Dict
from pylibcugraph import (
get_two_hop_neighbors as pylibcugraph_get_two_hop_neighbors,
select_random_vertices as pylibcugraph_select_random_vertices,
)
from pylibcugraph import (
ResourceHandle,
GraphProperties,
SGGraph,
)
# FIXME: Change to consistent camel case naming
class simpleGraphImpl:
edgeWeightCol = "weights"
edgeIdCol = "edge_id"
edgeTypeCol = "edge_type"
srcCol = "src"
dstCol = "dst"
class EdgeList:
def __init__(
self,
source: cudf.Series,
destination: cudf.Series,
edge_attr: Union[cudf.DataFrame, Dict[str, cudf.DataFrame]] = None,
):
self.edgelist_df = cudf.DataFrame()
self.edgelist_df[simpleGraphImpl.srcCol] = source
self.edgelist_df[simpleGraphImpl.dstCol] = destination
self.weights = False
if edge_attr is not None:
if isinstance(edge_attr, dict):
if edge_attr[simpleGraphImpl.edgeWeightCol] is not None:
self.weights = True
for ea in [
simpleGraphImpl.edgeIdCol,
simpleGraphImpl.edgeTypeCol,
simpleGraphImpl.edgeWeightCol,
]:
if edge_attr[ea] is not None:
self.edgelist_df[ea] = edge_attr[ea]
else:
self.weights = True
self.edgelist_df[simpleGraphImpl.edgeWeightCol] = edge_attr
class AdjList:
def __init__(self, offsets, indices, value=None):
self.offsets = offsets
self.indices = indices
self.weights = value # Should be a dataframe for multiple weights
class transposedAdjList:
def __init__(self, offsets, indices, value=None):
simpleGraphImpl.AdjList.__init__(self, offsets, indices, value)
class Properties:
def __init__(self, properties):
self.multi_edge = getattr(properties, "multi_edge", False)
self.directed = properties.directed
self.renumbered = False
self.self_loop = None
self.store_transposed = False
self.isolated_vertices = None
self.node_count = None
self.edge_count = None
self.weighted = False
def __init__(self, properties):
# Structure
self.edgelist = None
self.input_df = None
self.adjlist = None
self.transposedadjlist = None
self.renumber_map = None
self.properties = simpleGraphImpl.Properties(properties)
self._nodes = {}
# TODO: Move to new batch class
# MG - Batch
self.batch_enabled = False
self.batch_edgelists = None
self.batch_adjlists = None
self.batch_transposed_adjlists = None
self.source_columns = None
self.destination_columns = None
self.vertex_columns = None
self.weight_column = None
# Functions
# FIXME: Change to public function
# FIXME: Make function more modular
# edge_attr: None, weight, or (weight, id, type)
def __from_edgelist(
self,
input_df,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
legacy_renum_only=True,
store_transposed=False,
):
if legacy_renum_only:
warning_msg = (
"The parameter 'legacy_renum_only' is deprecated and will be removed."
)
warnings.warn(
warning_msg,
)
# Verify column names present in input DataFrame
s_col = source
d_col = destination
if not isinstance(s_col, list):
s_col = [s_col]
if not isinstance(d_col, list):
d_col = [d_col]
if not (
set(s_col).issubset(set(input_df.columns))
and set(d_col).issubset(set(input_df.columns))
):
raise ValueError(
"source column names and/or destination column "
"names not found in input. Recheck the source and "
"destination parameters"
)
df_columns = s_col + d_col
self.vertex_columns = df_columns.copy()
if edge_attr is not None:
if weight is not None or edge_id is not None or edge_type is not None:
raise ValueError(
"If specifying edge_attr, cannot specify weight/edge_id/edge_type"
)
if isinstance(edge_attr, str):
weight = edge_attr
edge_attr = [weight]
if not (set(edge_attr).issubset(set(input_df.columns))):
raise ValueError(
f"edge_attr column {edge_attr} not found in input."
"Recheck the edge_attr parameter"
)
self.properties.weighted = True
if len(edge_attr) != 1 and len(edge_attr) != 3:
raise ValueError(
f"Invalid number of edge attributes " f"passed. {edge_attr}"
)
# The symmetrize step may add additional edges with unknown
# ids and types for an undirected graph. Therefore, only
# directed graphs may be used with ids and types.
if len(edge_attr) == 3:
if not self.properties.directed:
raise ValueError(
"User-provided edge ids and edge "
"types are not permitted for an "
"undirected graph."
)
weight, edge_id, edge_type = edge_attr
else:
edge_attr = []
if weight is not None:
edge_attr.append(weight)
self.properties.weighted = True
if edge_id is not None:
edge_attr.append(edge_id)
if edge_type is not None:
edge_attr.append(edge_type)
df_columns += edge_attr
input_df = input_df[df_columns]
# FIXME: check if the consolidated graph fits on the
# device before gathering all the edge lists
# Consolidation
if isinstance(input_df, cudf.DataFrame):
if len(input_df[source]) > 2147483100:
raise ValueError(
"cudf dataFrame edge list is too big to fit in a single GPU"
)
elist = input_df
elif isinstance(input_df, dask_cudf.DataFrame):
if len(input_df[source]) > 2147483100:
raise ValueError(
"dask_cudf dataFrame edge list is too big to fit in a single GPU"
)
elist = input_df.compute().reset_index(drop=True)
else:
raise TypeError("input should be a cudf.DataFrame or a dask_cudf dataFrame")
# initial, unmodified input dataframe.
self.input_df = elist
self.weight_column = weight
self.source_columns = source
self.destination_columns = destination
# Renumbering
self.renumber_map = None
self.store_transposed = store_transposed
if renumber:
# FIXME: Should SG do lazy evaluation like MG?
elist, renumber_map = NumberMap.renumber(
elist,
source,
destination,
store_transposed=False,
legacy_renum_only=legacy_renum_only,
)
source = renumber_map.renumbered_src_col_name
destination = renumber_map.renumbered_dst_col_name
# Use renumber_map to figure out if the python renumbering occured
self.properties.renumbered = renumber_map.is_renumbered
self.renumber_map = renumber_map
self.renumber_map.implementation.src_col_names = simpleGraphImpl.srcCol
self.renumber_map.implementation.dst_col_names = simpleGraphImpl.dstCol
else:
if type(source) is list and type(destination) is list:
raise ValueError("set renumber to True for multi column ids")
elif elist[source].dtype not in [np.int32, np.int64] or elist[
destination
].dtype not in [np.int32, np.int64]:
raise ValueError("set renumber to True for non integer columns ids")
# The dataframe will be symmetrized iff the graph is undirected
# otherwise the inital dataframe will be returned. Duplicated edges
# will be dropped unless the graph is a MultiGraph(Not Implemented yet)
# TODO: Update Symmetrize to work on Graph and/or DataFrame
if edge_attr is not None:
source_col, dest_col, value_col = symmetrize(
elist,
source,
destination,
edge_attr,
multi=self.properties.multi_edge,
symmetrize=not self.properties.directed,
)
if isinstance(value_col, cudf.DataFrame):
value_dict = {}
for i in value_col.columns:
value_dict[i] = value_col[i]
value_col = value_dict
else:
value_col = None
source_col, dest_col = symmetrize(
elist,
source,
destination,
multi=self.properties.multi_edge,
symmetrize=not self.properties.directed,
)
if isinstance(value_col, dict):
value_col = {
self.edgeWeightCol: value_col[weight] if weight in value_col else None,
self.edgeIdCol: value_col[edge_id] if edge_id in value_col else None,
self.edgeTypeCol: value_col[edge_type]
if edge_type in value_col
else None,
}
self.edgelist = simpleGraphImpl.EdgeList(source_col, dest_col, value_col)
if self.batch_enabled:
self._replicate_edgelist()
self._make_plc_graph(
value_col=value_col, store_transposed=store_transposed, renumber=renumber
)
def to_pandas_edgelist(
self,
source="src",
destination="dst",
weight="weights",
):
"""
Returns the graph edge list as a Pandas DataFrame.
Parameters
----------
source : str or array-like, optional (default='src')
source column name or array of column names
destination : str or array-like, optional (default='dst')
destination column name or array of column names
weight : str or array-like, optional (default='weight')
weight column name or array of column names
Returns
-------
df : pandas.DataFrame
"""
gdf = self.view_edge_list()
if self.properties.weighted:
gdf.rename(
columns={
simpleGraphImpl.srcCol: source,
simpleGraphImpl.dstCol: destination,
"weight": weight,
},
inplace=True,
)
else:
gdf.rename(
columns={
simpleGraphImpl.srcCol: source,
simpleGraphImpl.dstCol: destination,
},
inplace=True,
)
return gdf.to_pandas()
def to_pandas_adjacency(self):
"""
Returns the graph adjacency matrix as a Pandas DataFrame.
"""
np_array_data = self.to_numpy_array()
pdf = pd.DataFrame(np_array_data)
nodes = self.nodes().values_host.tolist()
pdf.columns = nodes
pdf.index = nodes
return pdf
def to_numpy_array(self):
"""
Returns the graph adjacency matrix as a NumPy array.
"""
nlen = self.number_of_nodes()
elen = self.number_of_edges()
df = self.edgelist.edgelist_df
np_array = np.full((nlen, nlen), 0.0)
nodes = self.nodes()
for i in range(0, elen):
# Map vertices to consecutive integers
idx_src = nodes[nodes == df[simpleGraphImpl.srcCol].iloc[i]].index[0]
idx_dst = nodes[nodes == df[simpleGraphImpl.dstCol].iloc[i]].index[0]
np_array[idx_src, idx_dst] = df[self.edgeWeightCol].iloc[i]
return np_array
def to_numpy_matrix(self):
"""
Returns the graph adjacency matrix as a NumPy matrix.
"""
np_array = self.to_numpy_array()
return np.asmatrix(np_array)
def view_edge_list(self):
"""
Display the edge list. Compute it if needed.
NOTE: If the graph is of type Graph() then the displayed undirected
edges are the same as displayed by networkx Graph(), but the direction
could be different i.e. an edge displayed by cugraph as (src, dst)
could be displayed as (dst, src) by networkx.
cugraph.Graph stores symmetrized edgelist internally. For displaying
undirected edgelist for a Graph the upper trianglar matrix of the
symmetrized edgelist is returned.
networkx.Graph renumbers the input and stores the upper triangle of
this renumbered input. Since the internal renumbering of networx and
cugraph is different, the upper triangular matrix of networkx
renumbered input may not be the same as cugraph's upper trianglar
matrix of the symmetrized edgelist. Hence the displayed source and
destination pairs in both will represent the same edge but node values
could be swapped.
Returns
-------
df : cudf.DataFrame
This cudf.DataFrame wraps source, destination and weight
df[src] : cudf.Series
contains the source index for each edge
df[dst] : cudf.Series
contains the destination index for each edge
df[weight] : cudf.Series
Column is only present for weighted Graph,
then containing the weight value for each edge
"""
if self.edgelist is None:
src, dst, weights = graph_primtypes_wrapper.view_edge_list(self)
self.edgelist = self.EdgeList(src, dst, weights)
srcCol = self.source_columns
dstCol = self.destination_columns
"""
Only use the initial input dataframe if the graph is directed with:
1) single vertex column names with integer vertex type
2) list of vertex column names of size 1 with integer vertex type
"""
use_initial_input_df = True
if self.input_df is not None:
if type(srcCol) is list and type(dstCol) is list:
if len(srcCol) == 1:
srcCol = srcCol[0]
dstCol = dstCol[0]
if self.input_df[srcCol].dtype not in [
np.int32,
np.int64,
] or self.input_df[dstCol].dtype not in [np.int32, np.int64]:
# hypergraph case
use_initial_input_df = False
else:
use_initial_input_df = False
elif self.input_df[srcCol].dtype not in [
np.int32,
np.int64,
] or self.input_df[dstCol].dtype not in [np.int32, np.int64]:
use_initial_input_df = False
else:
use_initial_input_df = False
if use_initial_input_df and self.properties.directed:
edgelist_df = self.input_df
else:
edgelist_df = self.edgelist.edgelist_df
if srcCol is None and dstCol is None:
srcCol = simpleGraphImpl.srcCol
dstCol = simpleGraphImpl.dstCol
if use_initial_input_df and not self.properties.directed:
# unrenumber before extracting the upper triangular part
# case when the vertex column name is of size 1
if self.properties.renumbered:
edgelist_df = self.renumber_map.unrenumber(
edgelist_df, simpleGraphImpl.srcCol
)
edgelist_df = self.renumber_map.unrenumber(
edgelist_df, simpleGraphImpl.dstCol
)
edgelist_df = edgelist_df.rename(
columns=self.renumber_map.internal_to_external_col_names
)
# extract the upper triangular part
edgelist_df = edgelist_df[edgelist_df[srcCol] <= edgelist_df[dstCol]]
else:
edgelist_df = edgelist_df[
edgelist_df[simpleGraphImpl.srcCol]
<= edgelist_df[simpleGraphImpl.dstCol]
]
elif not use_initial_input_df and self.properties.renumbered:
# Do not unrenumber the vertices if the initial input df was used
if not self.properties.directed:
edgelist_df = edgelist_df[
edgelist_df[simpleGraphImpl.srcCol]
<= edgelist_df[simpleGraphImpl.dstCol]
]
edgelist_df = self.renumber_map.unrenumber(
edgelist_df, simpleGraphImpl.srcCol
)
edgelist_df = self.renumber_map.unrenumber(
edgelist_df, simpleGraphImpl.dstCol
)
edgelist_df = edgelist_df.rename(
columns=self.renumber_map.internal_to_external_col_names
)
if self.vertex_columns is not None and len(self.vertex_columns) == 2:
# single column vertices internally renamed to 'simpleGraphImpl.srcCol'
# and 'simpleGraphImpl.dstCol'.
if not set(self.vertex_columns).issubset(set(edgelist_df.columns)):
# Get the initial column names passed by the user.
if srcCol is not None and dstCol is not None:
edgelist_df = edgelist_df.rename(
columns={
simpleGraphImpl.srcCol: srcCol,
simpleGraphImpl.dstCol: dstCol,
}
)
# FIXME: When renumbered, the MG API uses renumbered col names which
# is not consistant with the SG API.
self.properties.edge_count = len(edgelist_df)
wgtCol = simpleGraphImpl.edgeWeightCol
edgelist_df = edgelist_df.rename(
columns={wgtCol: self.weight_column}
).reset_index(drop=True)
return edgelist_df
def delete_edge_list(self):
"""
Delete the edge list.
"""
# decrease reference count to free memory if the referenced objects are
# no longer used.
self.edgelist = None
def __from_adjlist(
self,
offset_col,
index_col,
value_col=None,
renumber=True,
store_transposed=False,
):
self.adjlist = simpleGraphImpl.AdjList(offset_col, index_col, value_col)
if value_col is not None:
self.properties.weighted = True
self._make_plc_graph(
value_col=value_col, store_transposed=store_transposed, renumber=renumber
)
if self.batch_enabled:
self._replicate_adjlist()
def view_adj_list(self):
"""
Display the adjacency list. Compute it if needed.
Returns
-------
offset_col : cudf.Series
This cudf.Series wraps a gdf_column of size V + 1 (V: number of
vertices).
The gdf column contains the offsets for the vertices in this graph.
Offsets are in the range [0, E] (E: number of edges).
index_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the destination index for each edge.
Destination indices are in the range [0, V) (V: number of
vertices).
value_col : cudf.Series or ``None``
This pointer is ``None`` for unweighted graphs.
For weighted graphs, this cudf.Series wraps a gdf_column of size E
(E: number of edges).
The gdf column contains the weight value for each edge.
The expected type of the gdf_column element is floating point
number.
"""
if self.adjlist is None:
if self.transposedadjlist is not None and self.properties.directed is False:
off, ind, vals = (
self.transposedadjlist.offsets,
self.transposedadjlist.indices,
self.transposedadjlist.weights,
)
else:
off, ind, vals = graph_primtypes_wrapper.view_adj_list(self)
self.adjlist = self.AdjList(off, ind, vals)
if self.batch_enabled:
self._replicate_adjlist()
return self.adjlist.offsets, self.adjlist.indices, self.adjlist.weights
def view_transposed_adj_list(self):
"""
Display the transposed adjacency list. Compute it if needed.
Returns
-------
offset_col : cudf.Series
This cudf.Series wraps a gdf_column of size V + 1 (V: number of
vertices).
The gdf column contains the offsets for the vertices in this graph.
Offsets are in the range [0, E] (E: number of edges).
index_col : cudf.Series
This cudf.Series wraps a gdf_column of size E (E: number of edges).
The gdf column contains the destination index for each edge.
Destination indices are in the range [0, V) (V: number of
vertices).
value_col : cudf.Series or ``None``
This pointer is ``None`` for unweighted graphs.
For weighted graphs, this cudf.Series wraps a gdf_column of size E
(E: number of edges).
The gdf column contains the weight value for each edge.
The expected type of the gdf_column element is floating point
number.
"""
if self.transposedadjlist is None:
if self.adjlist is not None and self.properties.directed is False:
off, ind, vals = (
self.adjlist.offsets,
self.adjlist.indices,
self.adjlist.weights,
)
else:
(
off,
ind,
vals,
) = graph_primtypes_wrapper.view_transposed_adj_list(self)
self.transposedadjlist = self.transposedAdjList(off, ind, vals)
if self.batch_enabled:
self._replicate_transposed_adjlist()
return (
self.transposedadjlist.offsets,
self.transposedadjlist.indices,
self.transposedadjlist.weights,
)
def delete_adj_list(self):
"""
Delete the adjacency list.
"""
self.adjlist = None
# FIXME: Update batch workflow and refactor to suitable file
def enable_batch(self):
client = mg_utils.get_client()
comms = Comms.get_comms()
if client is None or comms is None:
raise RuntimeError(
"MG Batch needs a Dask Client and the "
"Communicator needs to be initialized."
)
self.batch_enabled = True
if self.edgelist is not None:
if self.batch_edgelists is None:
self._replicate_edgelist()
if self.adjlist is not None:
if self.batch_adjlists is None:
self._replicate_adjlist()
if self.transposedadjlist is not None:
if self.batch_transposed_adjlists is None:
self._replicate_transposed_adjlist()
def _replicate_edgelist(self):
client = mg_utils.get_client()
comms = Comms.get_comms()
# FIXME: There might be a better way to control it
if client is None:
return
work_futures = replication.replicate_cudf_dataframe(
self.edgelist.edgelist_df, client=client, comms=comms
)
self.batch_edgelists = work_futures
def _replicate_adjlist(self):
client = mg_utils.get_client()
comms = Comms.get_comms()
# FIXME: There might be a better way to control it
if client is None:
return
weights = None
offsets_futures = replication.replicate_cudf_series(
self.adjlist.offsets, client=client, comms=comms
)
indices_futures = replication.replicate_cudf_series(
self.adjlist.indices, client=client, comms=comms
)
if self.adjlist.weights is not None:
weights = replication.replicate_cudf_series(self.adjlist.weights)
else:
weights = {worker: None for worker in offsets_futures}
merged_futures = {
worker: [
offsets_futures[worker],
indices_futures[worker],
weights[worker],
]
for worker in offsets_futures
}
self.batch_adjlists = merged_futures
# FIXME: Not implemented yet
def _replicate_transposed_adjlist(self):
self.batch_transposed_adjlists = True
def get_two_hop_neighbors(self, start_vertices=None):
"""
Compute vertex pairs that are two hops apart. The resulting pairs are
sorted before returning.
Returns
-------
df : cudf.DataFrame
df[first] : cudf.Series
the first vertex id of a pair, if an external vertex id
is defined by only one column
df[second] : cudf.Series
the second vertex id of a pair, if an external vertex id
is defined by only one column
"""
if isinstance(start_vertices, int):
start_vertices = [start_vertices]
if isinstance(start_vertices, list):
start_vertices = cudf.Series(start_vertices)
if self.properties.renumbered is True:
if start_vertices is not None:
start_vertices = self.renumber_map.to_internal_vertex_id(start_vertices)
start_vertices_type = self.edgelist.edgelist_df["src"].dtype
start_vertices = start_vertices.astype(start_vertices_type)
do_expensive_check = False
first, second = pylibcugraph_get_two_hop_neighbors(
resource_handle=ResourceHandle(),
graph=self._plc_graph,
start_vertices=start_vertices,
do_expensive_check=do_expensive_check,
)
df = cudf.DataFrame()
df["first"] = first
df["second"] = second
if self.properties.renumbered is True:
df = self.renumber_map.unrenumber(df, "first")
df = self.renumber_map.unrenumber(df, "second")
return df
def select_random_vertices(
self,
random_state: int = None,
num_vertices: int = None,
) -> Union[cudf.Series, cudf.DataFrame]:
"""
Select random vertices from the graph
Parameters
----------
random_state : int , optional(default=None)
Random state to use when generating samples. Optional argument,
defaults to a hash of process id, time, and hostname.
num_vertices : int, optional(default=None)
Number of vertices to sample. If None, all vertices will be selected
Returns
-------
return random vertices from the graph as a cudf
"""
vertices = pylibcugraph_select_random_vertices(
resource_handle=ResourceHandle(),
graph=self._plc_graph,
random_state=random_state,
num_vertices=num_vertices,
)
vertices = cudf.Series(vertices)
if self.properties.renumbered is True:
df_ = cudf.DataFrame()
df_["vertex"] = vertices
df_ = self.renumber_map.unrenumber(df_, "vertex")
vertices = df_["vertex"]
return vertices
def number_of_vertices(self):
"""
Get the number of nodes in the graph.
"""
if self.properties.node_count is None:
if self.adjlist is not None:
self.properties.node_count = len(self.adjlist.offsets) - 1
elif self.transposedadjlist is not None:
self.properties.node_count = len(self.transposedadjlist.offsets) - 1
elif self.edgelist is not None:
self.properties.node_count = len(self.nodes())
else:
raise RuntimeError("Graph is Empty")
return self.properties.node_count
def number_of_nodes(self):
"""
An alias of number_of_vertices(). This function is added for NetworkX
compatibility.
"""
return self.number_of_vertices()
def number_of_edges(self, directed_edges=False):
"""
Get the number of edges in the graph.
"""
# TODO: Move to Outer graphs?
if directed_edges and self.edgelist is not None:
return len(self.edgelist.edgelist_df)
if self.properties.edge_count is None:
if self.edgelist is not None:
if self.properties.directed is False:
self.properties.edge_count = len(
self.edgelist.edgelist_df[
self.edgelist.edgelist_df[simpleGraphImpl.srcCol]
>= self.edgelist.edgelist_df[simpleGraphImpl.dstCol]
]
)
else:
self.properties.edge_count = len(self.edgelist.edgelist_df)
elif self.adjlist is not None:
self.properties.edge_count = len(self.adjlist.indices)
elif self.transposedadjlist is not None:
self.properties.edge_count = len(self.transposedadjlist.indices)
else:
raise ValueError("Graph is Empty")
return self.properties.edge_count
def in_degree(self, vertex_subset=None):
"""
Compute vertex in-degree. Vertex in-degree is the number of edges
pointing into the vertex. By default, this method computes vertex
degrees for the entire set of vertices. If vertex_subset is provided,
this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
A container of vertices for displaying corresponding in-degree.
If not set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the in_degree. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df[vertex] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df[degree] : cudf.Series
The computed in-degree of the corresponding vertex.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> df = G.in_degree([0,9,12])
"""
in_degree = self._degree(vertex_subset, direction=Direction.IN)
return in_degree
def out_degree(self, vertex_subset=None):
"""
Compute vertex out-degree. Vertex out-degree is the number of edges
pointing out from the vertex. By default, this method computes vertex
degrees for the entire set of vertices. If vertex_subset is provided,
this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
A container of vertices for displaying corresponding out-degree.
If not set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the out_degree. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df[vertex] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df[degree] : cudf.Series
The computed out-degree of the corresponding vertex.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> df = G.out_degree([0,9,12])
"""
out_degree = self._degree(vertex_subset, direction=Direction.OUT)
return out_degree
def degree(self, vertex_subset=None):
"""
Compute vertex degree, which is the total number of edges incident
to a vertex (both in and out edges). By default, this method computes
degrees for the entire set of vertices. If vertex_subset is provided,
then this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
a container of vertices for displaying corresponding degree. If not
set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the degree. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df['vertex'] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df['degree'] : cudf.Series
The computed degree of the corresponding vertex.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> all_df = G.degree()
>>> subset_df = G.degree([0,9,12])
"""
return self._degree(vertex_subset)
# FIXME: vertex_subset could be a DataFrame for multi-column vertices
def degrees(self, vertex_subset=None):
"""
Compute vertex in-degree and out-degree. By default, this method
computes vertex degrees for the entire set of vertices. If
vertex_subset is provided, this method optionally filters out all but
those listed in vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
A container of vertices for displaying corresponding degree. If not
set, degrees are computed for the entire set of vertices.
Returns
-------
df : cudf.DataFrame
GPU DataFrame of size N (the default) or the size of the given
vertices (vertex_subset) containing the degrees. The ordering is
relative to the adjacency list, or that given by the specified
vertex_subset.
df['vertex'] : cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df['in_degree'] : cudf.Series
The in-degree of the vertex.
df['out_degree'] : cudf.Series
The out-degree of the vertex.
Examples
--------
>>> M = cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_cudf_edgelist(M, '0', '1')
>>> df = G.degrees([0,9,12])
"""
(
vertex_col,
in_degree_col,
out_degree_col,
) = graph_primtypes_wrapper._degrees(self)
df = cudf.DataFrame()
df["vertex"] = vertex_col
df["in_degree"] = in_degree_col
df["out_degree"] = out_degree_col
if self.properties.renumbered:
# Get the internal vertex IDs
nodes = self.renumber_map.df_internal_to_external["id"]
else:
nodes = self.nodes()
# If the vertex IDs are not contiguous, remove results for the
# isolated vertices
df = df[df["vertex"].isin(nodes.to_cupy())]
if vertex_subset is not None:
if not isinstance(vertex_subset, cudf.Series):
vertex_subset = cudf.Series(vertex_subset)
if self.properties.renumbered:
vertex_subset = self.renumber_map.to_internal_vertex_id(
vertex_subset
)
vertex_subset = vertex_subset.to_cupy()
df = df[df["vertex"].isin(vertex_subset)]
if self.properties.renumbered:
df = self.renumber_map.unrenumber(df, "vertex")
return df
def _degree(self, vertex_subset, direction=Direction.ALL):
vertex_col, degree_col = graph_primtypes_wrapper._degree(self, direction)
df = cudf.DataFrame()
df["vertex"] = vertex_col
df["degree"] = degree_col
if self.properties.renumbered:
# Get the internal vertex IDs
nodes = self.renumber_map.df_internal_to_external["id"]
else:
nodes = self.nodes()
# If the vertex IDs are not contiguous, remove results for the
# isolated vertices
df = df[df["vertex"].isin(nodes.to_cupy())]
if vertex_subset is not None:
if not isinstance(vertex_subset, cudf.Series):
vertex_subset = cudf.Series(vertex_subset)
if self.properties.renumbered:
vertex_subset = self.renumber_map.to_internal_vertex_id(
vertex_subset
)
vertex_subset = vertex_subset.to_cupy()
df = df[df["vertex"].isin(vertex_subset)]
if self.properties.renumbered:
df = self.renumber_map.unrenumber(df, "vertex")
return df
def _make_plc_graph(
self,
value_col: Dict[str, cudf.DataFrame] = None,
store_transposed: bool = False,
renumber: bool = True,
):
"""
Parameters
----------
value_col : cudf.DataFrame or dict[str, cudf.DataFrame]
If a single dataframe is provided, this is assumed
to contain the edge weight values.
If a dictionary of dataframes is provided, then it is
assumed to contain edge properties.
store_transposed : bool (default=False)
Whether to store the graph in a transposed
format. Required by some algorithms.
renumber : bool (default=True)
Whether to renumber the vertices of the graph.
Required if inputted vertex ids are not of
int32 or int64 type.
"""
if value_col is None:
weight_col, id_col, type_col = None, None, None
elif isinstance(value_col, (cudf.DataFrame, cudf.Series)):
weight_col, id_col, type_col = value_col, None, None
elif isinstance(value_col, dict):
weight_col = value_col[self.edgeWeightCol]
id_col = value_col[self.edgeIdCol]
type_col = value_col[self.edgeTypeCol]
else:
raise ValueError(f"Illegal value col {type(value_col)}")
graph_props = GraphProperties(
is_multigraph=self.properties.multi_edge,
is_symmetric=not self.properties.directed,
)
if self.edgelist is not None:
input_array_format = "COO"
src_or_offset_array = self.edgelist.edgelist_df[simpleGraphImpl.srcCol]
dst_or_index_array = self.edgelist.edgelist_df[simpleGraphImpl.dstCol]
elif self.adjlist is not None:
input_array_format = "CSR"
src_or_offset_array = self.adjlist.offsets
dst_or_index_array = self.adjlist.indices
else:
raise TypeError(
"Edges need to be represented in either in COO or CSR format."
)
if weight_col is not None:
weight_t = weight_col.dtype
if weight_t == "int32":
weight_col = weight_col.astype("float32")
if weight_t == "int64":
weight_col = weight_col.astype("float64")
if id_col is not None:
if src_or_offset_array.dtype == "int64" and id_col.dtype != "int64":
id_col = id_col.astype("int64")
warnings.warn(
f"Vertex type is int64 but edge id type is {id_col.dtype}"
", automatically casting edge id type to int64. "
"This may cause extra memory usage. Consider passing"
" a int64 list of edge ids instead."
)
self._plc_graph = SGGraph(
resource_handle=ResourceHandle(),
graph_properties=graph_props,
src_or_offset_array=src_or_offset_array,
dst_or_index_array=dst_or_index_array,
weight_array=weight_col,
edge_id_array=id_col,
edge_type_array=type_col,
store_transposed=store_transposed,
renumber=renumber,
do_expensive_check=True,
input_array_format=input_array_format,
)
def to_directed(self, DiG, store_transposed=False):
"""
Return a directed representation of the graph Implementation.
This function copies the internal structures and returns the
directed view.
Note: this will discard any edge ids or edge types but will
preserve edge weights if present.
"""
DiG.properties.renumbered = self.properties.renumbered
DiG.renumber_map = self.renumber_map
DiG.edgelist = self.edgelist
DiG.adjlist = self.adjlist
DiG.transposedadjlist = self.transposedadjlist
if simpleGraphImpl.edgeWeightCol in self.edgelist.edgelist_df:
value_col = self.edgelist.edgelist_df[simpleGraphImpl.edgeWeightCol]
else:
value_col = None
DiG._make_plc_graph(value_col, store_transposed)
def to_undirected(self, G, store_transposed=False):
"""
Return an undirected copy of the graph.
Note: This will discard any edge ids or edge types but will
preserve edge weights if present.
"""
G.properties.renumbered = self.properties.renumbered
G.renumber_map = self.renumber_map
if self.properties.directed is False:
G.edgelist = self.edgelist
G.adjlist = self.adjlist
G.transposedadjlist = self.transposedadjlist
else:
df = self.edgelist.edgelist_df
if self.edgelist.weights:
source_col, dest_col, value_col = symmetrize(
df,
simpleGraphImpl.srcCol,
simpleGraphImpl.dstCol,
simpleGraphImpl.edgeWeightCol,
)
else:
source_col, dest_col = symmetrize(
df, simpleGraphImpl.srcCol, simpleGraphImpl.dstCol
)
value_col = None
G.edgelist = simpleGraphImpl.EdgeList(source_col, dest_col, value_col)
if simpleGraphImpl.edgeWeightCol in self.edgelist.edgelist_df:
value_col = self.edgelist.edgelist_df[simpleGraphImpl.edgeWeightCol]
else:
value_col = None
G._make_plc_graph(value_col, store_transposed)
def has_node(self, n):
"""
Returns True if the graph contains the node n.
"""
return (self.nodes() == n).any().any()
def has_edge(self, u, v):
"""
Returns True if the graph contains the edge (u,v).
"""
if self.properties.renumbered:
tmp = cudf.DataFrame({simpleGraphImpl.srcCol: [u, v]})
tmp = tmp.astype({simpleGraphImpl.srcCol: "int"})
tmp = self.renumber_map.add_internal_vertex_id(
tmp, "id", simpleGraphImpl.srcCol, preserve_order=True
)
u = tmp["id"][0]
v = tmp["id"][1]
df = self.edgelist.edgelist_df
return (
(df[simpleGraphImpl.srcCol] == u) & (df[simpleGraphImpl.dstCol] == v)
).any()
def has_self_loop(self):
"""
Returns True if the graph has self loop.
"""
# Detect self loop
if self.properties.self_loop is None:
elist = self.edgelist.edgelist_df
if (elist[simpleGraphImpl.srcCol] == elist[simpleGraphImpl.dstCol]).any():
self.properties.self_loop = True
else:
self.properties.self_loop = False
return self.properties.self_loop
def edges(self):
"""
Returns all the edges in the graph as a cudf.DataFrame containing
sources and destinations. It does not return the edge weights.
For viewing edges with weights use view_edge_list()
"""
return self.view_edge_list()[self.vertex_columns]
def nodes(self):
"""
Returns all the nodes in the graph as a cudf.Series, in order of appearance
in the edgelist (source column first, then destination column).
If multi columns vertices, return a cudf.DataFrame.
"""
if self.edgelist is not None:
df = self.edgelist.edgelist_df
if self.properties.renumbered:
df = self.renumber_map.df_internal_to_external.drop(columns="id")
if len(df.columns) > 1:
return df
else:
return df[df.columns[0]]
else:
return (
cudf.concat(
[df[simpleGraphImpl.srcCol], df[simpleGraphImpl.dstCol]]
)
.drop_duplicates()
.reset_index(drop=True)
)
if self.adjlist is not None:
return cudf.Series(np.arange(0, self.number_of_nodes()))
def neighbors(self, n):
if self.edgelist is None:
raise RuntimeError("Graph has no Edgelist.")
if self.properties.renumbered:
node = self.renumber_map.to_internal_vertex_id(cudf.Series([n]))
if len(node) == 0:
return cudf.Series(dtype="int")
n = node[0]
df = self.edgelist.edgelist_df
neighbors = df[df[simpleGraphImpl.srcCol] == n][
simpleGraphImpl.dstCol
].reset_index(drop=True)
if self.properties.renumbered:
# FIXME: Multi-column vertices
return self.renumber_map.from_internal_vertex_id(neighbors)["0"]
else:
return neighbors
def vertex_column_size(self):
if self.properties.renumbered:
return self.renumber_map.vertex_column_size()
else:
return 1
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_implementation/npartiteGraph.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .simpleGraph import simpleGraphImpl
import cudf
class npartiteGraphImpl(simpleGraphImpl):
def __init__(self, properties):
super(npartiteGraphImpl, self).__init__(properties)
self.properties.bipartite = properties.bipartite
# API may change in future
def __from_edgelist(
self,
input_df,
source="source",
destination="destination",
edge_attr=None,
renumber=True,
):
self._simpleGraphImpl__from_edgelist(
input_df,
source=source,
destination=destination,
edge_attr=edge_attr,
renumber=renumber,
)
def sets(self):
"""
Returns the bipartite set of nodes. This solely relies on the user's
call of add_nodes_from with the bipartite parameter. This does not
parse the graph to compute bipartite sets. If bipartite argument was
not provided during add_nodes_from(), it raise an exception that the
graph is not bipartite.
"""
# TO DO: Call coloring algorithm
set_names = [i for i in self._nodes.keys() if i != "all_nodes"]
if self.properties.bipartite:
top = self._nodes[set_names[0]]
if len(set_names) == 2:
bottom = self._nodes[set_names[1]]
else:
bottom = cudf.Series(
set(self.nodes().values_host) - set(top.values_host)
)
return top, bottom
else:
return {k: self._nodes[k] for k in set_names}
# API may change in future
def add_nodes_from(self, nodes, bipartite=None, multipartite=None):
"""
Add nodes information to the Graph.
Parameters
----------
nodes : list or cudf.Series
The nodes of the graph to be stored. If bipartite and multipartite
arguments are not passed, the nodes are considered to be a list of
all the nodes present in the Graph.
bipartite : str, optional (default=None)
Sets the Graph as bipartite. The nodes are stored as a set of nodes
of the partition named as bipartite argument.
multipartite : str, optional (default=None)
Sets the Graph as multipartite. The nodes are stored as a set of
nodes of the partition named as multipartite argument.
"""
if bipartite is None and multipartite is None:
raise Exception("Partition not provided")
else:
set_names = [i for i in self._nodes.keys() if i != "all_nodes"]
if multipartite is not None:
if self.properties.bipartite:
raise Exception(
"The Graph is bipartite. " "Use bipartite option instead."
)
elif bipartite is not None:
if not self.properties.bipartite:
raise Exception(
"The Graph is set as npartite. "
"Use multipartite option instead."
)
multipartite = bipartite
if multipartite not in set_names and len(set_names) == 2:
raise Exception(
"The Graph is set as bipartite and "
"already has two partitions initialized."
)
self._nodes[multipartite] = cudf.Series(nodes)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_implementation/simpleDistributedGraph.py | # Copyright (c) 2021-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
from typing import Union
import warnings
import cudf
import cupy as cp
import dask
import dask_cudf
from dask import delayed
from dask.distributed import wait, default_client
import numpy as np
from pylibcugraph import (
MGGraph,
ResourceHandle,
GraphProperties,
get_two_hop_neighbors as pylibcugraph_get_two_hop_neighbors,
select_random_vertices as pylibcugraph_select_random_vertices,
)
from cugraph.structure import graph_primtypes_wrapper
from cugraph.structure.graph_primtypes_wrapper import Direction
from cugraph.structure.number_map import NumberMap
from cugraph.structure.symmetrize import symmetrize
from cugraph.dask.common.part_utils import (
get_persisted_df_worker_map,
get_length_of_parts,
persist_dask_df_equal_parts_per_worker,
)
from cugraph.dask import get_n_workers
import cugraph.dask.comms.comms as Comms
class simpleDistributedGraphImpl:
edgeWeightCol = "value"
edgeIdCol = "edge_id"
edgeTypeCol = "edge_type"
class EdgeList:
def __init__(self, ddf):
self.edgelist_df = ddf
self.weights = False
# FIXME: Edge Attribute not handled
# class AdjList:
# Not Supported
# class transposedAdjList:
# Not Supported
class Properties:
def __init__(self, properties):
self.multi_edge = getattr(properties, "multi_edge", False)
self.directed = properties.directed
self.renumber = False
self.store_transposed = False
self.self_loop = None
self.isolated_vertices = None
self.node_count = None
self.edge_count = None
self.weighted = False
def __init__(self, properties):
# Structure
self.edgelist = None
self.renumber_map = None
self.properties = simpleDistributedGraphImpl.Properties(properties)
self.source_columns = None
self.destination_columns = None
self.weight_column = None
self.vertex_columns = None
def _make_plc_graph(
sID,
edata_x,
graph_props,
src_col_name,
dst_col_name,
store_transposed,
num_edges,
):
weights = None
edge_ids = None
edge_types = None
if simpleDistributedGraphImpl.edgeWeightCol in edata_x[0]:
weights = _get_column_from_ls_dfs(
edata_x, simpleDistributedGraphImpl.edgeWeightCol
)
if weights.dtype == "int32":
weights = weights.astype("float32")
elif weights.dtype == "int64":
weights = weights.astype("float64")
if simpleDistributedGraphImpl.edgeIdCol in edata_x[0]:
edge_ids = _get_column_from_ls_dfs(
edata_x, simpleDistributedGraphImpl.edgeIdCol
)
if edata_x[0][src_col_name].dtype == "int64" and edge_ids.dtype != "int64":
edge_ids = edge_ids.astype("int64")
warnings.warn(
f"Vertex type is int64 but edge id type is {edge_ids.dtype}"
", automatically casting edge id type to int64. "
"This may cause extra memory usage. Consider passing"
" a int64 list of edge ids instead."
)
if simpleDistributedGraphImpl.edgeTypeCol in edata_x[0]:
edge_types = _get_column_from_ls_dfs(
edata_x, simpleDistributedGraphImpl.edgeTypeCol
)
return MGGraph(
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
graph_properties=graph_props,
src_array=_get_column_from_ls_dfs(edata_x, src_col_name),
dst_array=_get_column_from_ls_dfs(edata_x, dst_col_name),
weight_array=weights,
edge_id_array=edge_ids,
edge_type_array=edge_types,
store_transposed=store_transposed,
num_edges=num_edges,
do_expensive_check=False,
)
# Functions
def __from_edgelist(
self,
input_ddf,
source="source",
destination="destination",
edge_attr=None,
weight=None,
edge_id=None,
edge_type=None,
renumber=True,
store_transposed=False,
legacy_renum_only=False,
):
if not isinstance(input_ddf, dask_cudf.DataFrame):
raise TypeError("input should be a dask_cudf dataFrame")
if renumber is False:
if type(source) is list and type(destination) is list:
raise ValueError("set renumber to True for multi column ids")
elif input_ddf[source].dtype not in [np.int32, np.int64] or input_ddf[
destination
].dtype not in [np.int32, np.int64]:
raise ValueError("set renumber to True for non integer columns ids")
s_col = source
d_col = destination
if not isinstance(s_col, list):
s_col = [s_col]
if not isinstance(d_col, list):
d_col = [d_col]
if not (
set(s_col).issubset(set(input_ddf.columns))
and set(d_col).issubset(set(input_ddf.columns))
):
raise ValueError(
"source column names and/or destination column "
"names not found in input. Recheck the source "
"and destination parameters"
)
ddf_columns = s_col + d_col
self.vertex_columns = ddf_columns.copy()
_client = default_client()
workers = _client.scheduler_info()["workers"]
# Repartition to 2 partitions per GPU for memory efficient process
input_ddf = input_ddf.repartition(npartitions=len(workers) * 2)
input_ddf = input_ddf.map_partitions(lambda df: df.copy())
# The dataframe will be symmetrized iff the graph is undirected
# otherwise, the inital dataframe will be returned
if edge_attr is not None:
if weight is not None or edge_id is not None or edge_type is not None:
raise ValueError(
"If specifying edge_attr, cannot specify weight/edge_id/edge_type"
)
if isinstance(edge_attr, str):
weight = edge_attr
edge_attr = [weight]
if not (set(edge_attr).issubset(set(input_ddf.columns))):
raise ValueError(
"edge_attr column name not found in input."
"Recheck the edge_attr parameter"
)
self.properties.weighted = True
if len(edge_attr) == 1:
input_ddf = input_ddf.rename(columns={edge_attr[0]: self.edgeWeightCol})
value_col_names = [self.edgeWeightCol]
elif len(edge_attr) == 3:
weight_col, id_col, type_col = edge_attr
input_ddf = input_ddf[ddf_columns + [weight_col, id_col, type_col]]
input_ddf.columns = ddf_columns + [
self.edgeWeightCol,
self.edgeIdCol,
self.edgeTypeCol,
]
value_col_names = [self.edgeWeightCol, self.edgeIdCol, self.edgeTypeCol]
else:
raise ValueError("Only 1 or 3 values may be provided" "for edge_attr")
# The symmetrize step may add additional edges with unknown
# ids and types for an undirected graph. Therefore, only
# directed graphs may be used with ids and types.
# FIXME: Drop the check in symmetrize.py as it is redundant
if len(edge_attr) == 3:
if not self.properties.directed:
raise ValueError(
"User-provided edge ids and/or edge "
"types are not permitted for an "
"undirected graph."
)
else:
value_col_names = {}
if weight is not None:
value_col_names[weight] = self.edgeWeightCol
self.properties.weighted = True
if edge_id is not None:
value_col_names[edge_id] = self.edgeIdCol
if edge_type is not None:
value_col_names[edge_type] = self.edgeTypeCol
if len(value_col_names.keys()) > 0:
input_ddf = input_ddf.rename(columns=value_col_names)
value_col_names = list(value_col_names.values())
ddf_columns += value_col_names
input_ddf = input_ddf[ddf_columns]
if len(value_col_names) == 0:
source_col, dest_col = symmetrize(
input_ddf,
source,
destination,
multi=self.properties.multi_edge,
symmetrize=not self.properties.directed,
)
value_col = None
else:
source_col, dest_col, value_col = symmetrize(
input_ddf,
source,
destination,
value_col_names,
multi=self.properties.multi_edge,
symmetrize=not self.properties.directed,
)
if isinstance(source_col, dask_cudf.Series):
# Create a dask_cudf dataframe from the cudf series obtained
# from symmetrization
input_ddf = source_col.to_frame()
input_ddf = input_ddf.rename(columns={source_col.name: source})
input_ddf[destination] = dest_col
else:
# Multi column dask_cudf dataframe
input_ddf = dask_cudf.concat([source_col, dest_col], axis=1)
if value_col is not None:
for vc in value_col_names:
input_ddf[vc] = value_col[vc]
self.input_df = input_ddf
#
# Keep all of the original parameters so we can lazily
# evaluate this function
#
# FIXME: Edge Attribute not handled
# FIXME: the parameter below is no longer used for unrenumbering
self.properties.renumber = renumber
self.source_columns = source
self.destination_columns = destination
self.weight_column = weight
# If renumbering is not enabled, this function will only create
# the edgelist_df and not do any renumbering.
# C++ renumbering is enabled by default for algorithms that
# support it (but only called if renumbering is on)
self.compute_renumber_edge_list(
transposed=store_transposed, legacy_renum_only=legacy_renum_only
)
if renumber is False:
self.properties.renumbered = False
src_col_name = self.source_columns
dst_col_name = self.destination_columns
else:
# If 'renumber' is set to 'True', an extra renumbering (python)
# occurs if there are non-integer or multi-columns vertices
self.properties.renumbered = self.renumber_map.is_renumbered
src_col_name = self.renumber_map.renumbered_src_col_name
dst_col_name = self.renumber_map.renumbered_dst_col_name
ddf = self.edgelist.edgelist_df
graph_props = GraphProperties(
is_multigraph=self.properties.multi_edge,
is_symmetric=not self.properties.directed,
)
ddf = ddf.repartition(npartitions=len(workers) * 2)
persisted_keys_d = persist_dask_df_equal_parts_per_worker(
ddf, _client, return_type="dict"
)
del ddf
length_of_parts = get_length_of_parts(persisted_keys_d, _client)
num_edges = sum(
[item for sublist in length_of_parts.values() for item in sublist]
)
delayed_tasks_d = {
w: delayed(simpleDistributedGraphImpl._make_plc_graph)(
Comms.get_session_id(),
edata,
graph_props,
src_col_name,
dst_col_name,
store_transposed,
num_edges,
)
for w, edata in persisted_keys_d.items()
}
self._plc_graph = {
w: _client.compute(
delayed_task, workers=w, allow_other_workers=False, pure=False
)
for w, delayed_task in delayed_tasks_d.items()
}
wait(list(self._plc_graph.values()))
del persisted_keys_d
del delayed_tasks_d
_client.run(gc.collect)
@property
def renumbered(self):
# This property is now used to determine if a dataframe was renumbered
# by checking the column name. Only the renumbered dataframes will have
# their column names renamed to 'renumbered_src' and 'renumbered_dst'
renumbered_vertex_col_names = ["renumbered_src", "renumbered_dst"]
if self.edgelist is not None:
if self.edgelist.edgelist_df is not None and (
set(renumbered_vertex_col_names).issubset(
set(self.edgelist.edgelist_df.columns)
)
):
return True
return False
def view_edge_list(self):
"""
FIXME: Should this also return the edge ids and types?
Display the edge list. Compute it if needed.
NOTE: If the graph is of type Graph() then the displayed undirected
edges are the same as displayed by networkx Graph(), but the direction
could be different i.e. an edge displayed by cugraph as (src, dst)
could be displayed as (dst, src) by networkx.
cugraph.Graph stores symmetrized edgelist internally. For displaying
undirected edgelist for a Graph the upper trianglar matrix of the
symmetrized edgelist is returned.
networkx.Graph renumbers the input and stores the upper triangle of
this renumbered input. Since the internal renumbering of networx and
cugraph is different, the upper triangular matrix of networkx
renumbered input may not be the same as cugraph's upper trianglar
matrix of the symmetrized edgelist. Hence the displayed source and
destination pairs in both will represent the same edge but node values
could be swapped.
Returns
-------
df : dask_cudf.DataFrame
This dask_cudf.DataFrame wraps source, destination and weight
df[src] : dask_cudf.Series
contains the source index for each edge
df[dst] : dask_cudf.Series
contains the destination index for each edge
df[weight] : dask_cudf.Series
Column is only present for weighted Graph,
then containing the weight value for each edge
"""
if self.edgelist is None:
raise RuntimeError("Graph has no Edgelist.")
edgelist_df = self.input_df
is_string_dtype = False
is_multi_column = False
wgtCol = simpleDistributedGraphImpl.edgeWeightCol
if not self.properties.directed:
srcCol = self.source_columns
dstCol = self.destination_columns
if self.renumber_map.unrenumbered_id_type == "object":
# FIXME: Use the renumbered vertices instead and then un-renumber.
# This operation can be expensive.
is_string_dtype = True
edgelist_df = self.edgelist.edgelist_df
srcCol = self.renumber_map.renumbered_src_col_name
dstCol = self.renumber_map.renumbered_dst_col_name
if isinstance(srcCol, list):
srcCol = self.renumber_map.renumbered_src_col_name
dstCol = self.renumber_map.renumbered_dst_col_name
edgelist_df = self.edgelist.edgelist_df
# unrenumber before extracting the upper triangular part
if len(self.source_columns) == 1:
edgelist_df = self.renumber_map.unrenumber(edgelist_df, srcCol)
edgelist_df = self.renumber_map.unrenumber(edgelist_df, dstCol)
else:
is_multi_column = True
edgelist_df[srcCol], edgelist_df[dstCol] = edgelist_df[
[srcCol, dstCol]
].min(axis=1), edgelist_df[[srcCol, dstCol]].max(axis=1)
edgelist_df = edgelist_df.groupby(by=[srcCol, dstCol]).sum().reset_index()
if wgtCol in edgelist_df.columns:
# FIXME: This breaks if there are are multi edges as those will
# be dropped during the symmetrization step and the original 'weight'
# will be halved.
edgelist_df[wgtCol] /= 2
if is_string_dtype or is_multi_column:
# unrenumber the vertices
edgelist_df = self.renumber_map.unrenumber(edgelist_df, srcCol)
edgelist_df = self.renumber_map.unrenumber(edgelist_df, dstCol)
if self.properties.renumbered:
edgelist_df = edgelist_df.rename(
columns=self.renumber_map.internal_to_external_col_names
)
# If there is no 'wgt' column, nothing will happen
edgelist_df = edgelist_df.rename(columns={wgtCol: self.weight_column})
self.properties.edge_count = len(edgelist_df)
return edgelist_df
def delete_edge_list(self):
"""
Delete the edge list.
"""
self.edgelist = None
def clear(self):
"""
Empty this graph.
"""
self.edgelist = None
def number_of_vertices(self):
"""
Get the number of nodes in the graph.
"""
if self.properties.node_count is None:
self.properties.node_count = len(self.nodes())
return self.properties.node_count
def number_of_nodes(self):
"""
An alias of number_of_vertices().
"""
return self.number_of_vertices()
def number_of_edges(self, directed_edges=False):
"""
Get the number of edges in the graph.
"""
if directed_edges and self.edgelist is not None:
return len(self.edgelist.edgelist_df)
if self.properties.edge_count is None:
if self.edgelist is not None:
self.view_edge_list()
else:
raise RuntimeError("Graph is Empty")
return self.properties.edge_count
def in_degree(self, vertex_subset=None):
"""
Compute vertex in-degree. Vertex in-degree is the number of edges
pointing into the vertex. By default, this method computes vertex
degrees for the entire set of vertices. If vertex_subset is provided,
this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf or dask_cudf object, iterable container,
opt. (default=None)
A container of vertices for displaying corresponding in-degree.
If not set, degrees are computed for the entire set of vertices.
Returns
-------
df : dask_cudf.DataFrame
Distributed GPU DataFrame of size N (the default) or the size of
the given vertices (vertex_subset) containing the in_degree.
The ordering is relative to the adjacency list, or that given by
the specified vertex_subset.
df[vertex] : dask_cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df[degree] : dask_cudf.Series
The computed in-degree of the corresponding vertex.
Examples
--------
>>> M = dask_cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_dask_cudf_edgelist(M, '0', '1')
>>> df = G.in_degree([0,9,12])
"""
src_col_name = self.source_columns
dst_col_name = self.destination_columns
# select only the vertex columns
if not isinstance(src_col_name, list) and not isinstance(dst_col_name, list):
vertex_col_names = [src_col_name] + [dst_col_name]
df = self.input_df[vertex_col_names]
df = df.drop(columns=src_col_name)
nodes = self.nodes()
if isinstance(nodes, dask_cudf.Series):
nodes = nodes.to_frame()
if not isinstance(dst_col_name, list):
df = df.rename(columns={dst_col_name: "vertex"})
dst_col_name = "vertex"
vertex_col_names = df.columns
nodes.columns = vertex_col_names
df["degree"] = 1
# FIXME: leverage the C++ in_degree for optimal performance
in_degree = (
df.groupby(dst_col_name)
.degree.count(split_out=df.npartitions)
.reset_index()
)
# Add vertices with zero in_degree
in_degree = nodes.merge(in_degree, how="outer").fillna(0)
# Convert vertex_subset to dataframe.
if vertex_subset is not None:
if not isinstance(vertex_subset, (dask_cudf.DataFrame, cudf.DataFrame)):
if isinstance(vertex_subset, dask_cudf.Series):
vertex_subset = vertex_subset.to_frame()
else:
df = cudf.DataFrame()
if isinstance(vertex_subset, (cudf.Series, list)):
df["vertex"] = vertex_subset
vertex_subset = df
if isinstance(vertex_subset, (dask_cudf.DataFrame, cudf.DataFrame)):
vertex_subset.columns = vertex_col_names
in_degree = in_degree.merge(vertex_subset, how="inner")
else:
raise TypeError(
f"Expected type are: cudf, dask_cudf objects, "
f"iterable container, got "
f"{type(vertex_subset)}"
)
return in_degree
def out_degree(self, vertex_subset=None):
"""
Compute vertex out-degree. Vertex out-degree is the number of edges
pointing out from the vertex. By default, this method computes vertex
degrees for the entire set of vertices. If vertex_subset is provided,
this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf or dask_cudf object, iterable container,
opt. (default=None)
A container of vertices for displaying corresponding out-degree.
If not set, degrees are computed for the entire set of vertices.
Returns
-------
df : dask_cudf.DataFrame
Distributed GPU DataFrame of size N (the default) or the size of
the given vertices (vertex_subset) containing the out_degree.
The ordering is relative to the adjacency list, or that given by
the specified vertex_subset.
df[vertex] : dask_cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df[degree] : dask_cudf.Series
The computed out-degree of the corresponding vertex.
Examples
--------
>>> M = dask_cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_dask_cudf_edgelist(M, '0', '1')
>>> df = G.out_degree([0,9,12])
"""
src_col_name = self.source_columns
dst_col_name = self.destination_columns
# select only the vertex columns
if not isinstance(src_col_name, list) and not isinstance(dst_col_name, list):
vertex_col_names = [src_col_name] + [dst_col_name]
df = self.input_df[vertex_col_names]
df = df.drop(columns=dst_col_name)
nodes = self.nodes()
if isinstance(nodes, dask_cudf.Series):
nodes = nodes.to_frame()
if not isinstance(src_col_name, list):
df = df.rename(columns={src_col_name: "vertex"})
src_col_name = "vertex"
vertex_col_names = df.columns
nodes.columns = vertex_col_names
df["degree"] = 1
# leverage the C++ out_degree for optimal performance
out_degree = (
df.groupby(src_col_name)
.degree.count(split_out=df.npartitions)
.reset_index()
)
# Add vertices with zero out_degree
out_degree = nodes.merge(out_degree, how="outer").fillna(0)
# Convert vertex_subset to dataframe.
if vertex_subset is not None:
if not isinstance(vertex_subset, (dask_cudf.DataFrame, cudf.DataFrame)):
if isinstance(vertex_subset, dask_cudf.Series):
vertex_subset = vertex_subset.to_frame()
else:
df = cudf.DataFrame()
if isinstance(vertex_subset, (cudf.Series, list)):
df["vertex"] = vertex_subset
vertex_subset = df
if isinstance(vertex_subset, (dask_cudf.DataFrame, cudf.DataFrame)):
vertex_subset.columns = vertex_col_names
out_degree = out_degree.merge(vertex_subset, how="inner")
else:
raise TypeError(
f"Expected type are: cudf, dask_cudf objects, "
f"iterable container, got "
f"{type(vertex_subset)}"
)
return out_degree
def degree(self, vertex_subset=None):
"""
Compute vertex degree, which is the total number of edges incident
to a vertex (both in and out edges). By default, this method computes
degrees for the entire set of vertices. If vertex_subset is provided,
then this method optionally filters out all but those listed in
vertex_subset.
Parameters
----------
vertex_subset : cudf or dask_cudf object, iterable container,
opt. (default=None)
a container of vertices for displaying corresponding degree. If not
set, degrees are computed for the entire set of vertices.
Returns
-------
df : dask_cudf.DataFrame
Distributed GPU DataFrame of size N (the default) or the size of
the given vertices (vertex_subset) containing the degree.
The ordering is relative to the adjacency list, or that given by
the specified vertex_subset.
df['vertex'] : dask_cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df['degree'] : dask_cudf.Series
The computed degree of the corresponding vertex.
Examples
--------
>>> M = dask_cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_dask_cudf_edgelist(M, '0', '1')
>>> all_df = G.degree()
>>> subset_df = G.degree([0,9,12])
"""
vertex_in_degree = self.in_degree(vertex_subset)
vertex_out_degree = self.out_degree(vertex_subset)
# FIXME: leverage the C++ degree for optimal performance
vertex_degree = dask_cudf.concat([vertex_in_degree, vertex_out_degree])
vertex_degree = vertex_degree.groupby(["vertex"], as_index=False).sum(
split_out=self.input_df.npartitions
)
return vertex_degree
# FIXME: vertex_subset could be a DataFrame for multi-column vertices
def degrees(self, vertex_subset=None):
"""
Compute vertex in-degree and out-degree. By default, this method
computes vertex degrees for the entire set of vertices. If
vertex_subset is provided, this method optionally filters out all but
those listed in vertex_subset.
Parameters
----------
vertex_subset : cudf.Series or iterable container, optional
A container of vertices for displaying corresponding degree. If not
set, degrees are computed for the entire set of vertices.
Returns
-------
df : dask_cudf.DataFrame
Distributed GPU DataFrame of size N (the default) or the size of
the given vertices (vertex_subset) containing the degrees.
The ordering is relative to the adjacency list, or that given by
the specified vertex_subset.
df['vertex'] : dask_cudf.Series
The vertex IDs (will be identical to vertex_subset if
specified).
df['in_degree'] : dask_cudf.Series
The in-degree of the vertex.
df['out_degree'] : dask_cudf.Series
The out-degree of the vertex.
Examples
--------
>>> M = dask_cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_dask_cudf_edgelist(M, '0', '1')
>>> df = G.degrees([0,9,12])
"""
raise NotImplementedError("Not supported for distributed graph")
def _degree(self, vertex_subset, direction=Direction.ALL):
vertex_col, degree_col = graph_primtypes_wrapper._mg_degree(self, direction)
df = cudf.DataFrame()
df["vertex"] = vertex_col
df["degree"] = degree_col
if self.renumbered is True:
df = self.renumber_map.unrenumber(df, "vertex")
if vertex_subset is not None:
df = df[df["vertex"].isin(vertex_subset)]
return df
def get_two_hop_neighbors(self, start_vertices=None):
"""
Compute vertex pairs that are two hops apart. The resulting pairs are
sorted before returning.
Returns
-------
df : cudf.DataFrame
df[first] : cudf.Series
the first vertex id of a pair, if an external vertex id
is defined by only one column
df[second] : cudf.Series
the second vertex id of a pair, if an external vertex id
is defined by only one column
"""
_client = default_client()
def _call_plc_two_hop_neighbors(sID, mg_graph_x, start_vertices):
return pylibcugraph_get_two_hop_neighbors(
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
graph=mg_graph_x,
start_vertices=start_vertices,
do_expensive_check=False,
)
if isinstance(start_vertices, int):
start_vertices = [start_vertices]
if isinstance(start_vertices, list):
start_vertices = cudf.Series(start_vertices)
if start_vertices is not None:
if self.renumbered:
start_vertices = self.renumber_map.to_internal_vertex_id(start_vertices)
start_vertices_type = self.edgelist.edgelist_df.dtypes[0]
else:
start_vertices_type = self.input_df.dtypes[0]
if not isinstance(start_vertices, (dask_cudf.Series)):
start_vertices = dask_cudf.from_cudf(
start_vertices,
npartitions=min(self._npartitions, len(start_vertices)),
)
start_vertices = start_vertices.astype(start_vertices_type)
n_workers = get_n_workers()
start_vertices = start_vertices.repartition(npartitions=n_workers)
start_vertices = persist_dask_df_equal_parts_per_worker(
start_vertices, _client
)
start_vertices = get_persisted_df_worker_map(start_vertices, _client)
result = [
_client.submit(
_call_plc_two_hop_neighbors,
Comms.get_session_id(),
self._plc_graph[w],
start_vertices[w][0],
workers=[w],
allow_other_workers=False,
)
for w in start_vertices.keys()
]
else:
result = [
_client.submit(
_call_plc_two_hop_neighbors,
Comms.get_session_id(),
self._plc_graph[w],
start_vertices,
workers=[w],
allow_other_workers=False,
)
for w in Comms.get_workers()
]
wait(result)
def convert_to_cudf(cp_arrays):
"""
Creates a cudf DataFrame from cupy arrays from pylibcugraph wrapper
"""
first, second = cp_arrays
df = cudf.DataFrame()
df["first"] = first
df["second"] = second
return df
cudf_result = [
_client.submit(convert_to_cudf, cp_arrays) for cp_arrays in result
]
wait(cudf_result)
ddf = dask_cudf.from_delayed(cudf_result).persist()
wait(ddf)
# Wait until the inactive futures are released
wait([(r.release(), c_r.release()) for r, c_r in zip(result, cudf_result)])
if self.properties.renumbered:
ddf = self.renumber_map.unrenumber(ddf, "first")
ddf = self.renumber_map.unrenumber(ddf, "second")
return ddf
def select_random_vertices(
self, random_state: int = None, num_vertices: int = None
) -> Union[dask_cudf.Series, dask_cudf.DataFrame]:
"""
Select random vertices from the graph
Parameters
----------
random_state : int , optional(default=None)
Random state to use when generating samples. Optional argument,
defaults to a hash of process id, time, and hostname.
num_vertices : int, optional(default=None)
Number of vertices to sample. If None, all vertices will be selected
Returns
-------
return random vertices from the graph as a dask object
"""
_client = default_client()
def convert_to_cudf(cp_arrays: cp.ndarray) -> cudf.Series:
"""
Creates a cudf Series from cupy arrays
"""
vertices = cudf.Series(cp_arrays)
return vertices
def _call_plc_select_random_vertices(
mg_graph_x, sID: bytes, random_state: int, num_vertices: int
) -> cudf.Series:
cp_arrays = pylibcugraph_select_random_vertices(
graph=mg_graph_x,
resource_handle=ResourceHandle(Comms.get_handle(sID).getHandle()),
random_state=random_state,
num_vertices=num_vertices,
)
return convert_to_cudf(cp_arrays)
def _mg_call_plc_select_random_vertices(
input_graph,
client: dask.distributed.client.Client,
sID: bytes,
random_state: int,
num_vertices: int,
) -> dask_cudf.Series:
result = [
client.submit(
_call_plc_select_random_vertices,
input_graph._plc_graph[w],
sID,
hash((random_state, i)),
num_vertices,
workers=[w],
allow_other_workers=False,
pure=False,
)
for i, w in enumerate(Comms.get_workers())
]
ddf = dask_cudf.from_delayed(result, verify_meta=False).persist()
wait(ddf)
wait([r.release() for r in result])
return ddf
ddf = _mg_call_plc_select_random_vertices(
self,
_client,
Comms.get_session_id(),
random_state,
num_vertices,
)
if self.properties.renumbered:
vertices = ddf.rename("vertex").to_frame()
vertices = self.renumber_map.unrenumber(vertices, "vertex")
if len(vertices.columns) == 1:
vertices = vertices["vertex"]
else:
vertices = ddf
return vertices
def to_directed(self, G):
"""
Return a directed representation of the graph.
Returns
-------
G : Graph(directed=True)
A directed graph with the same nodes, and each edge (u,v,weights)
replaced by two directed edges (u,v,weights) and (v,u,weights).
Examples
--------
>>> M = dask_cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph()
>>> G.from_dask_cudf_edgelist(M, '0', '1')
>>> DiG = G.to_directed()
"""
# TODO: Add support
raise NotImplementedError("Not supported for distributed graph")
def to_undirected(self, G):
"""
Return an undirected copy of the graph.
Returns
-------
G : Graph
A undirected graph with the same nodes, and each directed edge
(u,v,weights) replaced by an undirected edge (u,v,weights).
Examples
--------
>>> M = dask_cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> DiG = cugraph.Graph(directed=True)
>>> DiG.dask_from_cudf_edgelist(M, '0', '1')
>>> G = DiG.to_undirected()
"""
# TODO: Add support
raise NotImplementedError("Not supported for distributed graph")
def has_node(self, n):
"""
Returns True if the graph contains the node(s) n.
Examples
--------
>>> M = dask_cudf.read_csv(datasets_path / 'karate.csv', delimiter=' ',
... dtype=['int32', 'int32', 'float32'], header=None)
>>> G = cugraph.Graph(directed=True)
>>> G.from_dask_cudf_edgelist(M, '0', '1')
>>> valid_source = cudf.Series([5])
>>> invalid_source = cudf.Series([55])
>>> is_valid_vertex = G.has_node(valid_source)
>>> assert is_valid_vertex is True
>>> is_valid_vertex = G.has_node(invalid_source)
>>> assert is_valid_vertex is False
"""
# Convert input to dataframes so that it can be compared through merge
if not isinstance(n, (dask_cudf.DataFrame, cudf.DataFrame)):
if isinstance(n, dask_cudf.Series):
n = n.to_frame()
else:
df = cudf.DataFrame()
if not isinstance(n, (cudf.DataFrame, cudf.Series)):
n = [n]
if isinstance(n, (cudf.Series, list)):
df["vertex"] = n
n = df
if isinstance(n, (dask_cudf.DataFrame, cudf.DataFrame)):
nodes = self.nodes()
if not isinstance(self.nodes(), (dask_cudf.DataFrame, cudf.DataFrame)):
nodes = nodes.to_frame()
nodes.columns = n.columns
valid_vertex = nodes.merge(n, how="inner")
return len(valid_vertex) == len(n)
def has_edge(self, u, v):
"""
Returns True if the graph contains the edge (u,v).
"""
# TODO: Verify Correctness
if self.renumbered:
src_col_name = self.renumber_map.renumbered_src_col_name
tmp = cudf.DataFrame({src_col_name: [u, v]})
tmp = tmp.astype({src_col_name: "int"})
tmp = self.add_internal_vertex_id(
tmp, "id", src_col_name, preserve_order=True
)
u = tmp["id"][0]
v = tmp["id"][1]
df = self.edgelist.edgelist_df
return ((df["src"] == u) & (df["dst"] == v)).any().compute()
def edges(self):
"""
Returns all the edges in the graph as a cudf.DataFrame containing
sources and destinations. It does not return the edge weights.
For viewing edges with weights use view_edge_list()
"""
return self.view_edge_list()[self.vertex_columns]
def nodes(self):
"""
Returns all nodes in the graph as a dask_cudf.Series.
If multi columns vertices, return a dask_cudf.DataFrame.
If the edgelist was renumbered, this call returns the internal
nodes in the graph. To get the original nodes, convert the result to
a dataframe and do 'renumber_map.unrenumber' or 'G.unrenumber'
"""
if self.edgelist is not None:
if self.renumbered:
# FIXME: This relies on current implementation
# of NumberMap, should not really expose
# this, perhaps add a method to NumberMap
df = self.renumber_map.implementation.ddf.drop(columns="global_id")
if len(df.columns) > 1:
return df
else:
return df[df.columns[0]]
else:
df = self.input_df
return dask_cudf.concat(
[df[self.source_columns], df[self.destination_columns]]
).drop_duplicates()
else:
raise RuntimeError("Graph is Empty")
def neighbors(self, n):
if self.edgelist is None:
raise RuntimeError("Graph has no Edgelist.")
# FIXME: Add renumbering of node n
ddf = self.edgelist.edgelist_df
return ddf[ddf["src"] == n]["dst"].reset_index(drop=True)
def compute_renumber_edge_list(self, transposed=False, legacy_renum_only=False):
"""
Compute a renumbered edge list
This function works in the MNMG pipeline and will transform
the input dask_cudf.DataFrame into a renumbered edge list
in the prescribed direction.
This function will be called by the algorithms to ensure
that the graph is renumbered properly. The graph object will
cache the most recent renumbering attempt. For benchmarking
purposes, this function can be called prior to calling a
graph algorithm so we can measure the cost of computing
the renumbering separately from the cost of executing the
algorithm.
When creating a CSR-like structure, set transposed to False.
When creating a CSC-like structure, set transposed to True.
Parameters
----------
transposed : (optional) bool
If True, renumber with the intent to make a CSC-like
structure. If False, renumber with the intent to make
a CSR-like structure. Defaults to False.
legacy_renum_only : (optional) bool
if True, The C++ renumbering will not be triggered.
This parameter is added for new algos following the
C/Pylibcugraph path
This parameter is deprecated and will be removed.
"""
if legacy_renum_only:
warning_msg = (
"The parameter 'legacy_renum_only' is deprecated and will be removed."
)
warnings.warn(warning_msg, DeprecationWarning)
if not self.properties.renumber:
self.edgelist = self.EdgeList(self.input_df)
self.renumber_map = None
else:
if self.edgelist is not None:
if self.properties.directed is False:
return
if self.properties.store_transposed == transposed:
return
del self.edgelist
(renumbered_ddf, number_map,) = NumberMap.renumber_and_segment(
self.input_df,
self.source_columns,
self.destination_columns,
store_transposed=transposed,
legacy_renum_only=legacy_renum_only,
)
self.edgelist = self.EdgeList(renumbered_ddf)
self.renumber_map = number_map
self.properties.store_transposed = transposed
def vertex_column_size(self):
if self.renumbered:
return self.renumber_map.vertex_column_size()
else:
return 1
@property
def _npartitions(self) -> int:
return len(self._plc_graph)
def _get_column_from_ls_dfs(lst_df, col_name):
"""
This function concatenates the column
and drops it from the input list
"""
len_df = sum([len(df) for df in lst_df])
if len_df == 0:
return lst_df[0][col_name]
output_col = cudf.concat([df[col_name] for df in lst_df], ignore_index=True)
for df in lst_df:
df.drop(columns=[col_name], inplace=True)
gc.collect()
return output_col
| 0 |
rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure | rapidsai_public_repos/cugraph/python/cugraph/cugraph/structure/graph_implementation/__init__.py | # Copyright (c) 2021-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .simpleGraph import simpleGraphImpl
from .simpleDistributedGraph import simpleDistributedGraphImpl
from .npartiteGraph import npartiteGraphImpl
| 0 |
rapidsai_public_repos/cugraph/python | rapidsai_public_repos/cugraph/python/cugraph-service/README.md | # cugraph_service
## Description
[RAPIDS](https://rapids.ai) cugraph-service provides an RPC interace to a remote [RAPIDS cuGraph](https://github.com/rapidsai/cugraph) session, allowing users to perform GPU accelerated graph analytics from a remote process. cugraph-service uses cuGraph, cuDF, and other libraries on the server to execute graph data prep and analysis on server-side GPUs. Multiple clients can connect to the server allowing different users and processes the ability to access large graph data that may not otherwise be possible using the client resources.
## <div align="center"><img src="img/cugraph_service_pict.png" width="400px"/></div>
-----
### Quick start
1. Install the cugraph-service conda packages (installing the server package also installs the client):
```
conda install -c rapidsai-nightly -c rapidsai -c conda-forge -c nvidia cugraph-service-server
```
1. Run the server (use --help to see more options)
- To run on a single-GPU:
```
cugraph-service-server
```
- To run on multiple GPUs:
```
cugraph-service-server --start-local-cuda-cluster
```
1. Use the client in your application:
```
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> # check connection to the server, uptime is in seconds
>>> client.uptime()
28
>>> # create a graph from a CSV on the server
>>> graph_id = client.create_graph()
>>> client.get_graph_ids()
[1]
>>> client.load_csv_as_edge_data("karate.csv", dtypes=["int32", "int32", "float32"], vertex_col_names=["src", "dst"], header=0, graph_id=graph_id)
>>> # check the graph info
>>> client.get_graph_info(graph_id=graph_id)
{'num_vertex_properties': 0, 'num_edge_properties': 1, 'is_multi_gpu': 0, 'num_edges': 156, 'num_vertices_from_vertex_data': 0, 'num_vertices': 34}
>>> # run an algo
>>> client.uniform_neighbor_sample(start_list=[0,12], fanout_vals=[2], graph_id=graph_id)
UniformNeighborSampleResult(sources=[0, 0, 12, 12], destinations=[1, 21, 0, 3], indices=[1.0, 1.0, 1.0, 1.0])
>>> # cleanup the graph on the server
>>> client.delete_graph(graph_id)
>>> client.get_graph_ids()
[]
```
### Debugging
#### UCX-Py related variables:
`UCX_TLS` - set the transports to use, in priority order. Example:
```
UCX_TLS=tcp,cuda_copy,cuda_ipc
```
`UCX_TCP_CM_REUSEADDR` - reuse addresses. This can be used to avoid "resource in use" errors during starting/restarting the service repeatedly.
```
UCX_TCP_CM_REUSEADDR=y
```
`UCX_LOG_LEVEL` - set the level for which UCX will output messages to the console. The example below will only output "ERROR" or higher. Set to "DEBUG" to see debug and higher messages.
```
UCX_LOG_LEVEL=ERROR
```
#### UCX performance checks:
Because cugraph-service uses UCX-Py for direct-to-client GPU data transfers when specified, it can be helpful to understand the various UCX performance chacks available to ensure cugraph-service is transfering results as efficiently as the system is capable of.
```
ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 &
ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 localhost
```
```
ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 &
ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 localhost
```
```
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 &
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 localhost
```
```
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 &
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 localhost
```
```
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 1000000 -s 1000000000 &
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 1000000 -s 1000000000 localhost
```
### Building from source
Build and install the client first, then the server. This is necessary because the server depends on shared modules provided by the client.
```
$> cd cugraph_repo/python/cugraph_service/client
$> python setup.py install
$> cd ../server
$> python setup.py install
```
------
## <div align="left"><img src="img/rapids_logo.png" width="265px"/></div> Open GPU Data Science
The RAPIDS suite of open source software libraries aims to enable execution of end-to-end data science and analytics pipelines entirely on GPUs. It relies on NVIDIA® CUDA® primitives for low-level compute optimization but exposing that GPU parallelism and high-bandwidth memory speed through user-friendly Python interfaces.
| 0 |
rapidsai_public_repos/cugraph/python | rapidsai_public_repos/cugraph/python/cugraph-service/pytest.ini | # Copyright (c) 2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[pytest]
addopts = --benchmark-warmup=off
--benchmark-max-time=0
--benchmark-min-rounds=1
--benchmark-columns="min, max, mean, rounds"
## for use with rapids-pytest-benchmark plugin
#--benchmark-gpu-disable
## for use with pytest-cov plugin
#--cov=cugraph
#--cov-report term-missing:skip-covered
markers = sg: single-GPU
mg: multi-GPU
snmg: single-node multi-GPU
mnmg: multi-node multi-GPU
local: local cugraph
remote: cugraph-service
start_list_small: use a "small" start list length for sampling algos
start_list_large: use a "large" start list length for sampling algos
fanout_list_small: use a "small" fanout list length for sampling algos
fanout_list_large: use a "large" fanout list length for sampling algos
python_files = bench_*
test_*
python_functions = bench_*
test_*
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/client1_script.py | # Copyright (c) 2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to be used to simulate a cugraph_service client.
"""
import random
import time
from pathlib import Path
from cugraph_service_client import CugraphServiceClient
_data_dir = (Path(__file__).parent) / "data"
edgelist_csv_data = {
"karate": {
"csv_file_name": (_data_dir / "karate.csv").absolute().as_posix(),
"dtypes": ["int32", "int32", "float32"],
"num_edges": 156,
},
}
client = CugraphServiceClient()
test_data = edgelist_csv_data["karate"]
client.load_csv_as_edge_data(
test_data["csv_file_name"],
dtypes=test_data["dtypes"],
vertex_col_names=["0", "1"],
type_name="",
)
time.sleep(10)
n = int(random.random() * 1000)
# print(f"---> starting {n}", flush=True)
for i in range(1000000):
extracted_gid = client.extract_subgraph(allow_multi_edges=False)
# client.delete_graph(extracted_gid)
# print(f"---> {n}: extracted {extracted_gid}", flush=True)
# print(f"---> done {n}", flush=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/data.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
_data_dir = (Path(__file__).parent) / "data"
edgelist_csv_data = {
"karate": {
"csv_file_name": (_data_dir / "karate.csv").absolute().as_posix(),
"dtypes": ["int32", "int32", "float32"],
"num_edges": 156,
},
}
property_csv_data = {
"merchants": {
"csv_file_name": (_data_dir / "merchants.csv").absolute().as_posix(),
"dtypes": ["int32", "int32", "int32", "float32", "int32", "string"],
"vert_col_name": "merchant_id",
},
"users": {
"csv_file_name": (_data_dir / "users.csv").absolute().as_posix(),
"dtypes": ["int32", "int32", "int32"],
"vert_col_name": "user_id",
},
"transactions": {
"csv_file_name": (_data_dir / "transactions.csv").absolute().as_posix(),
"dtypes": ["int32", "int32", "float32", "float32", "int32", "string"],
"vert_col_names": ("user_id", "merchant_id"),
},
"relationships": {
"csv_file_name": (_data_dir / "relationships.csv").absolute().as_posix(),
"dtypes": ["int32", "int32", "int32"],
"vert_col_names": ("user_id_1", "user_id_2"),
},
"referrals": {
"csv_file_name": (_data_dir / "referrals.csv").absolute().as_posix(),
"dtypes": ["int32", "int32", "int32", "int32"],
"vert_col_names": ("user_id_1", "user_id_2"),
},
}
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/test_remote_graph.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import random
import pytest
import pandas as pd
import numpy as np
import cupy
import cudf
import cugraph
from cugraph.experimental import PropertyGraph
from cugraph_service_client import RemoteGraph
from . import data
# FIXME: Remove this once these pass in the CI environment.
pytest.skip(
reason="FIXME: many of these tests fail in CI and are currently run "
"manually only in dev environments.",
allow_module_level=True,
)
###############################################################################
# fixtures
# The fixtures used in these tests are defined here and in conftest.py
@pytest.fixture(scope="function")
def client_with_property_csvs_loaded(client):
"""
Loads each of the vertex and edge property CSVs into the default graph on
the server.
"""
merchants = data.property_csv_data["merchants"]
users = data.property_csv_data["users"]
transactions = data.property_csv_data["transactions"]
relationships = data.property_csv_data["relationships"]
referrals = data.property_csv_data["referrals"]
client.load_csv_as_vertex_data(
merchants["csv_file_name"],
dtypes=merchants["dtypes"],
vertex_col_name=merchants["vert_col_name"],
header=0,
type_name="merchants",
)
client.load_csv_as_vertex_data(
users["csv_file_name"],
dtypes=users["dtypes"],
vertex_col_name=users["vert_col_name"],
header=0,
type_name="users",
)
client.load_csv_as_edge_data(
transactions["csv_file_name"],
dtypes=transactions["dtypes"],
vertex_col_names=transactions["vert_col_names"],
header=0,
type_name="transactions",
)
client.load_csv_as_edge_data(
relationships["csv_file_name"],
dtypes=relationships["dtypes"],
vertex_col_names=relationships["vert_col_names"],
header=0,
type_name="relationships",
)
client.load_csv_as_edge_data(
referrals["csv_file_name"],
dtypes=referrals["dtypes"],
vertex_col_names=referrals["vert_col_names"],
header=0,
type_name="referrals",
)
assert client.get_graph_ids() == [0]
return client
@pytest.fixture(scope="function")
def pG_with_property_csvs_loaded():
"""
Loads each of the vertex and edge property CSVs into a
property graph.
"""
pG = PropertyGraph()
merchants = data.property_csv_data["merchants"]
users = data.property_csv_data["users"]
transactions = data.property_csv_data["transactions"]
relationships = data.property_csv_data["relationships"]
referrals = data.property_csv_data["referrals"]
merchants_df = cudf.read_csv(
merchants["csv_file_name"], dtype=merchants["dtypes"], header=0, delimiter=" "
)
pG.add_vertex_data(
merchants_df,
vertex_col_name=merchants["vert_col_name"],
type_name="merchants",
)
users_df = cudf.read_csv(
users["csv_file_name"], dtype=users["dtypes"], header=0, delimiter=" "
)
pG.add_vertex_data(
users_df,
vertex_col_name=users["vert_col_name"],
type_name="users",
)
transactions_df = cudf.read_csv(
transactions["csv_file_name"],
dtype=transactions["dtypes"],
header=0,
delimiter=" ",
)
pG.add_edge_data(
transactions_df,
vertex_col_names=transactions["vert_col_names"],
type_name="transactions",
)
relationships_df = cudf.read_csv(
relationships["csv_file_name"],
dtype=relationships["dtypes"],
header=0,
delimiter=" ",
)
pG.add_edge_data(
relationships_df,
vertex_col_names=relationships["vert_col_names"],
type_name="relationships",
)
referrals_df = cudf.read_csv(
referrals["csv_file_name"], dtype=referrals["dtypes"], header=0, delimiter=" "
)
pG.add_edge_data(
referrals_df,
vertex_col_names=referrals["vert_col_names"],
type_name="referrals",
)
return pG
def test_graph_info(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
graph_info = rpG.graph_info
expected_results = {
"num_edges": pG.get_num_edges(),
"num_edge_properties": len(pG.edge_property_names),
"num_vertices": pG.get_num_vertices(),
"num_vertex_properties": len(pG.vertex_property_names),
"num_vertices_from_vertex_data": pG.get_num_vertices(include_edge_data=False),
"is_multi_gpu": False,
}
assert set(graph_info.keys()) == set(expected_results.keys())
for k in expected_results:
assert graph_info[k] == expected_results[k]
def test_edges(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
# FIXME update this when edges() method issue is resolved.
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
edges = pG.get_edge_data(
columns=[pG.src_col_name, pG.dst_col_name, pG.type_col_name]
)
rpG_edges = rpG.edges()
assert (edges[pG.edge_id_col_name] == rpG_edges[rpG.edge_id_col_name]).all()
assert (edges[pG.src_col_name] == rpG_edges[rpG.src_col_name]).all()
assert (edges[pG.dst_col_name] == rpG_edges[rpG.dst_col_name]).all()
assert (
edges[pG.type_col_name].astype("string")
== rpG_edges[rpG.type_col_name].astype("string")
).all()
def test_property_type_names(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
assert rpG.vertex_property_names == pG.vertex_property_names
assert rpG.edge_property_names == pG.edge_property_names
assert rpG.vertex_types == pG.vertex_types
assert rpG.edge_types == pG.edge_types
def test_num_elements(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
assert rpG.get_num_vertices() == pG.get_num_vertices()
assert rpG.get_num_vertices(include_edge_data=False) == pG.get_num_vertices(
include_edge_data=False
)
for type in pG.vertex_types:
assert rpG.get_num_vertices(type=type) == pG.get_num_vertices(type=type)
assert rpG.get_num_vertices(
type=type, include_edge_data=False
) == pG.get_num_vertices(type=type, include_edge_data=False)
assert rpG.get_num_edges() == pG.get_num_edges()
for type in pG.edge_types:
assert rpG.get_num_edges(type=type) == pG.get_num_edges(type=type)
def test_get_vertex_data(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
vd = rpG.get_vertex_data()
vd[rpG.type_col_name] = vd[rpG.type_col_name].astype("string")
expected_vd = pG.get_vertex_data().fillna(0) # FIXME expose na handling
expected_vd[pG.type_col_name] = expected_vd[pG.type_col_name].astype("string")
for col in expected_vd.columns:
assert (expected_vd[col] == vd[col]).all()
for _ in range(3):
vertex_ids = random.sample(pG.vertices_ids().values_host.tolist(), 3)
vd = rpG.get_vertex_data(vertex_ids=vertex_ids)
vd[rpG.type_col_name] = vd[rpG.type_col_name].astype("string")
expected_vd = pG.get_vertex_data(vertex_ids=vertex_ids).fillna(
0
) # FIXME expose na handling
expected_vd[pG.type_col_name] = expected_vd[pG.type_col_name].astype("string")
for col in expected_vd.columns:
assert (expected_vd[col] == vd[col]).all()
vertex_type_list = [["merchants", "users"], ["merchants"]]
for vertex_types in vertex_type_list:
vd = rpG.get_vertex_data(types=vertex_types)
vd[rpG.type_col_name] = vd[rpG.type_col_name].astype("string")
expected_vd = pG.get_vertex_data(types=vertex_types).fillna(
0
) # FIXME expose na handling
expected_vd[pG.type_col_name] = expected_vd[pG.type_col_name].astype("string")
for col in expected_vd.columns:
assert (expected_vd[col] == vd[col]).all()
vd = rpG.get_vertex_data(types=["users"], columns=["vertical"])
vd[rpG.type_col_name] = vd[rpG.type_col_name].astype("string")
expected_vd = pG.get_vertex_data(types=["users"], columns=["vertical"]).fillna(
0
) # FIXME expose na handling
expected_vd[pG.type_col_name] = expected_vd[pG.type_col_name].astype("string")
for col in expected_vd.columns:
assert (expected_vd[col] == vd[col]).all()
def test_get_edge_data(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
ed = rpG.get_edge_data()
ed[rpG.type_col_name] = ed[rpG.type_col_name].astype("string")
expected_ed = pG.get_edge_data().fillna(0) # FIXME expose na handling
expected_ed[pG.type_col_name] = expected_ed[pG.type_col_name].astype("string")
for col in expected_ed.columns:
assert (expected_ed[col] == ed[col]).all()
for _ in range(3):
edge_ids = random.sample(
pG.get_edge_data()[pG.edge_id_col_name].values_host.tolist(), 3
)
ed = rpG.get_edge_data(edge_ids=edge_ids)
ed[rpG.type_col_name] = ed[rpG.type_col_name].astype("string")
expected_ed = pG.get_edge_data(edge_ids=edge_ids).fillna(
0
) # FIXME expose na handling
expected_ed[pG.type_col_name] = expected_ed[pG.type_col_name].astype("string")
for col in expected_ed.columns:
assert (expected_ed[col] == ed[col]).all()
for edge_types in [["transactions", "relationships"], ["referrals"]]:
ed = rpG.get_edge_data(types=edge_types)
ed[rpG.type_col_name] = ed[rpG.type_col_name].astype("string")
expected_ed = pG.get_edge_data(types=edge_types).fillna(
0
) # FIXME expose na handling
expected_ed[pG.type_col_name] = expected_ed[pG.type_col_name].astype("string")
for col in expected_ed.columns:
assert (expected_ed[col] == ed[col]).all()
ed = rpG.get_edge_data(types=["referrals"], columns=["stars", "merchant_id"])
ed[rpG.type_col_name] = ed[rpG.type_col_name].astype("string")
expected_ed = pG.get_edge_data(
types=["referrals"], columns=["stars", "merchant_id"]
).fillna(
0
) # FIXME expose na handling
expected_ed[pG.type_col_name] = expected_ed[pG.type_col_name].astype("string")
for col in expected_ed.columns:
assert (expected_ed[col] == ed[col]).all()
@pytest.mark.skip(reason="not yet implemented")
def test_add_vertex_data(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
raise NotImplementedError()
@pytest.mark.skip(reason="not yet implemented")
def test_add_edge_data(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
raise NotImplementedError()
def test_get_vertices(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
assert set(rpG.get_vertices().to_cupy().tolist()) == set(
pG.get_vertices().to_cupy().tolist()
)
@pytest.mark.skip(reason="not yet implemented")
def test_get_vertices_with_selection(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
raise NotImplementedError()
@pytest.mark.parametrize(
"create_using",
[
(None, None),
(cugraph.Graph(), "Graph"),
(cugraph.MultiGraph(), "MultiGraph"),
(cugraph.Graph(directed=True), "Graph(directed=True)"),
(cugraph.MultiGraph(directed=True), "MultiGraph(directed=True)"),
],
)
@pytest.mark.parametrize(
"selection",
[
(True, None),
(False, '_TYPE_=="transactions"'),
(True, '(_TYPE_=="transactions") | (_TYPE_=="relationships")'),
],
)
@pytest.mark.parametrize("renumber", [False, True])
def test_extract_subgraph(
client_with_property_csvs_loaded,
pG_with_property_csvs_loaded,
create_using,
selection,
renumber,
):
mg_only, selection = selection
if mg_only and create_using[0] is not None and not create_using[0].is_multigraph():
pytest.skip()
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
sg = pG.extract_subgraph(
create_using=create_using[0],
selection=None if selection is None else pG.select_edges(selection),
renumber_graph=renumber,
)
remote_sg = rpG.extract_subgraph(
create_using=create_using[1], selection=selection, renumber_graph=renumber
)
assert remote_sg.get_num_vertices() == sg.number_of_vertices()
expected_vertex_ids = (
cudf.concat([sg.edgelist.edgelist_df["src"], sg.edgelist.edgelist_df["dst"]])
.unique()
.sort_values()
)
if renumber:
expected_vertex_ids = sg.unrenumber(
cudf.DataFrame({"v": expected_vertex_ids}), "v"
)["v"]
assert set(remote_sg.vertices_ids().to_cupy().tolist()) == set(
expected_vertex_ids.to_cupy().tolist()
)
expected_edgelist = sg.edgelist.edgelist_df
if renumber:
expected_edgelist = sg.unrenumber(expected_edgelist, "src")
expected_edgelist = sg.unrenumber(expected_edgelist, "dst")
expected_edgelist = expected_edgelist.sort_values(["src", "dst"])
edge_data = remote_sg.get_edge_data().sort_values(
[remote_sg.src_col_name, remote_sg.dst_col_name]
)
assert (
expected_edgelist["src"].to_cupy().tolist()
== edge_data[remote_sg.src_col_name].to_cupy().tolist()
)
assert (
expected_edgelist["dst"].to_cupy().tolist()
== edge_data[remote_sg.dst_col_name].to_cupy().tolist()
)
def test_backend_pandas(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
# edges()
rpg_edges = rpG.edges(backend="pandas")
pg_edges = pG.get_edge_data(
columns=[pG.src_col_name, pG.dst_col_name, pG.type_col_name]
)
assert isinstance(rpg_edges, pd.DataFrame)
assert (
rpg_edges[rpG.src_col_name].tolist()
== pg_edges[pG.src_col_name].values_host.tolist()
)
assert (
rpg_edges[rpG.dst_col_name].tolist()
== pg_edges[pG.dst_col_name].values_host.tolist()
)
assert (
rpg_edges[rpG.type_col_name].tolist()
== pg_edges[pG.type_col_name].values_host.tolist()
)
assert (
rpg_edges[rpG.edge_id_col_name].tolist()
== pg_edges[pG.edge_id_col_name].values_host.tolist()
)
# get_vertex_data()
rpg_vertex_data = rpG.get_vertex_data(backend="pandas")
pg_vertex_data = pG.get_vertex_data().fillna(0)
assert isinstance(rpg_vertex_data, pd.DataFrame)
assert sorted(list(rpg_vertex_data.columns)) == sorted(list(pg_vertex_data.columns))
for col in rpg_vertex_data.columns:
assert rpg_vertex_data[col].tolist() == pg_vertex_data[col].values_host.tolist()
# get_edge_data()
rpg_edge_data = rpG.get_edge_data(backend="pandas")
pg_edge_data = pG.get_edge_data().fillna(0)
assert isinstance(rpg_edge_data, pd.DataFrame)
assert sorted(list(rpg_edge_data.columns)) == sorted(list(pg_edge_data.columns))
for col in rpg_edge_data.columns:
assert rpg_edge_data[col].tolist() == pg_edge_data[col].values_host.tolist()
def test_backend_cupy(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
# edges()
rpg_edges = rpG.edges(backend="cupy")
pg_edges = pG.get_edge_data(
columns=[pG.src_col_name, pG.dst_col_name, pG.type_col_name]
)
for out_tensor in rpg_edges:
assert isinstance(out_tensor, cupy.ndarray)
assert rpg_edges[1].get().tolist() == pg_edges[pG.src_col_name].values_host.tolist()
assert rpg_edges[2].get().tolist() == pg_edges[pG.dst_col_name].values_host.tolist()
assert (
rpg_edges[0].get().tolist()
== pg_edges[pG.edge_id_col_name].values_host.tolist()
)
rpg_types = rpg_edges[3].get().tolist()
pg_types = [
rpG._edge_categorical_dtype[t] for t in pg_edges[pG.type_col_name].values_host
]
assert rpg_types == pg_types
# get_vertex_data()
cols_of_interest = [
"merchant_location",
"merchant_size",
"merchant_sales",
"merchant_num_employees",
]
rpg_vertex_data = rpG.get_vertex_data(
types=["merchants"], columns=cols_of_interest, backend="cupy"
)
pg_vertex_data = pG.get_vertex_data(
types=["merchants"], columns=cols_of_interest
).fillna(0)
for out_tensor in rpg_vertex_data:
assert isinstance(out_tensor, cupy.ndarray)
assert len(rpg_vertex_data) == len(pg_vertex_data.columns)
for i, col in enumerate(cols_of_interest):
assert (
rpg_vertex_data[i + 2].tolist() == pg_vertex_data[col].values_host.tolist()
)
# get_edge_data()
cols_of_interest = ["time", "volume", "card_num"]
rpg_edge_data = rpG.get_edge_data(
types=["transactions"], columns=cols_of_interest, backend="cupy"
)
pg_edge_data = pG.get_edge_data(
types=["transactions"], columns=cols_of_interest
).fillna(0)
for out_tensor in rpg_edge_data:
assert isinstance(out_tensor, cupy.ndarray)
assert len(rpg_edge_data) == len(pg_edge_data.columns)
for i, col in enumerate(cols_of_interest):
assert rpg_edge_data[i + 4].tolist() == pg_edge_data[col].values_host.tolist()
def test_backend_numpy(client_with_property_csvs_loaded, pG_with_property_csvs_loaded):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
# edges()
rpg_edges = rpG.edges(backend="numpy")
pg_edges = pG.get_edge_data(
columns=[pG.src_col_name, pG.dst_col_name, pG.type_col_name]
)
for out_tensor in rpg_edges:
assert isinstance(out_tensor, np.ndarray)
assert rpg_edges[1].tolist() == pg_edges[pG.src_col_name].values_host.tolist()
assert rpg_edges[2].tolist() == pg_edges[pG.dst_col_name].values_host.tolist()
assert rpg_edges[0].tolist() == pg_edges[pG.edge_id_col_name].values_host.tolist()
rpg_types = rpg_edges[3].tolist()
pg_types = [
rpG._edge_categorical_dtype[t] for t in pg_edges[pG.type_col_name].values_host
]
assert rpg_types == pg_types
# get_vertex_data()
cols_of_interest = [
"merchant_location",
"merchant_size",
"merchant_sales",
"merchant_num_employees",
]
rpg_vertex_data = rpG.get_vertex_data(
types=["merchants"], columns=cols_of_interest, backend="numpy"
)
pg_vertex_data = pG.get_vertex_data(
types=["merchants"], columns=cols_of_interest
).fillna(0)
for out_tensor in rpg_vertex_data:
assert isinstance(out_tensor, np.ndarray)
assert len(rpg_vertex_data) == len(pg_vertex_data.columns)
for i, col in enumerate(cols_of_interest):
assert (
rpg_vertex_data[i + 2].tolist() == pg_vertex_data[col].values_host.tolist()
)
# get_edge_data()
cols_of_interest = ["time", "volume", "card_num"]
rpg_edge_data = rpG.get_edge_data(
types=["transactions"], columns=cols_of_interest, backend="numpy"
)
pg_edge_data = pG.get_edge_data(
types=["transactions"], columns=cols_of_interest
).fillna(0)
for out_tensor in rpg_edge_data:
assert isinstance(out_tensor, np.ndarray)
assert len(rpg_edge_data) == len(pg_edge_data.columns)
for i, col in enumerate(cols_of_interest):
assert rpg_edge_data[i + 4].tolist() == pg_edge_data[col].values_host.tolist()
try:
torch = importlib.import_module("torch")
except ModuleNotFoundError:
torch = None
@pytest.mark.skipif(torch is None, reason="torch not available")
@pytest.mark.parametrize("torch_backend", ["torch", "torch:0", "torch:cuda"])
def test_backend_torch(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded, torch_backend
):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
# edges()
rpg_edges = rpG.edges(backend=torch_backend)
pg_edges = pG.get_edge_data(
columns=[pG.src_col_name, pG.dst_col_name, pG.type_col_name]
)
for out_tensor in rpg_edges:
assert isinstance(out_tensor, torch.Tensor)
assert rpg_edges[1].tolist() == pg_edges[pG.src_col_name].values_host.tolist()
assert rpg_edges[2].tolist() == pg_edges[pG.dst_col_name].values_host.tolist()
assert rpg_edges[0].tolist() == pg_edges[pG.edge_id_col_name].values_host.tolist()
rpg_types = rpg_edges[3].tolist()
pg_types = [
rpG._edge_categorical_dtype[t] for t in pg_edges[pG.type_col_name].values_host
]
assert rpg_types == pg_types
# get_vertex_data()
cols_of_interest = [
"merchant_location",
"merchant_size",
"merchant_sales",
"merchant_num_employees",
]
rpg_vertex_data = rpG.get_vertex_data(
types=["merchants"], columns=cols_of_interest, backend=torch_backend
)
pg_vertex_data = pG.get_vertex_data(
types=["merchants"], columns=cols_of_interest
).fillna(0)
for out_tensor in rpg_vertex_data:
assert isinstance(out_tensor, torch.Tensor)
assert len(rpg_vertex_data) == len(pg_vertex_data.columns)
for i, col in enumerate(cols_of_interest):
assert (
rpg_vertex_data[i + 2].tolist() == pg_vertex_data[col].values_host.tolist()
)
# get_edge_data()
cols_of_interest = ["time", "volume", "card_num"]
rpg_edge_data = rpG.get_edge_data(
types=["transactions"], columns=cols_of_interest, backend=torch_backend
)
pg_edge_data = pG.get_edge_data(
types=["transactions"], columns=cols_of_interest
).fillna(0)
for out_tensor in rpg_edge_data:
assert isinstance(out_tensor, torch.Tensor)
assert len(rpg_edge_data) == len(pg_edge_data.columns)
for i, col in enumerate(cols_of_interest):
assert rpg_edge_data[i + 4].tolist() == pg_edge_data[col].values_host.tolist()
def test_remote_graph_neighbor_sample(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
# FIXME: consider moving the call dispatcher into cugraph-service-client or
# cugraph proper. Import it here for now to allow tests to run in an
# environment without cugraph-pyg.
from cugraph_pyg.loader.dispatch import call_cugraph_algorithm
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
selection = '_TYPE_=="transactions"'
sg = pG.extract_subgraph(
create_using=cugraph.MultiGraph(directed=True),
selection=pG.select_edges(selection),
renumber_graph=False,
)
remote_sg = rpG.extract_subgraph(
create_using="MultiGraph(directed=True)",
selection=selection,
renumber_graph=False,
)
res_local = call_cugraph_algorithm(
"uniform_neighbor_sample",
sg,
[89021, 89216],
[10],
with_replacement=True,
backend="cudf",
)
res_remote = call_cugraph_algorithm(
"uniform_neighbor_sample",
remote_sg,
[89021, 89216],
[10],
with_replacement=True,
backend="cudf",
)
assert (res_local["sources"] == res_remote["sources"]).all()
assert (res_local["destinations"] == res_remote["destinations"]).all()
assert (res_local["indices"] == res_remote["indices"]).all()
def test_remote_graph_neighbor_sample_implicit_subgraph(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
# FIXME: consider moving the call dispatcher into cugraph-service-client or
# cugraph proper. Import it here for now to allow tests to run in an
# environment without cugraph-pyg.
from cugraph_pyg.loader.dispatch import call_cugraph_algorithm
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
sg = pG.extract_subgraph(
create_using=cugraph.MultiGraph(directed=True),
renumber_graph=True,
)
res_local = call_cugraph_algorithm(
"uniform_neighbor_sample",
sg,
[89021, 89216],
[10],
with_replacement=True,
backend="cudf",
)
res_remote = call_cugraph_algorithm(
"uniform_neighbor_sample",
rpG,
[89021, 89216],
[10],
with_replacement=True,
backend="cudf",
)
assert (res_local["sources"] == res_remote["sources"]).all()
assert (res_local["destinations"] == res_remote["destinations"]).all()
assert (res_local["indices"] == res_remote["indices"]).all()
@pytest.mark.skip(reason="FIXME: this may fail in CI")
def test_remote_graph_renumber_vertices(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
re_local = pG.renumber_vertices_by_type()
re_remote = rpG.renumber_vertices_by_type()
assert re_local == re_remote
for k in range(len(re_remote)):
start = re_remote["start"][k]
stop = re_remote["stop"][k]
for i in range(start, stop + 1):
assert (
rpG.get_vertex_data(vertex_ids=[i])[rpG.type_col_name][0]
== re_remote.index[k]
)
@pytest.mark.skip(reason="FIXME: this may fail in CI")
def test_remote_graph_renumber_edges(
client_with_property_csvs_loaded, pG_with_property_csvs_loaded
):
rpG = RemoteGraph(client_with_property_csvs_loaded, 0)
pG = pG_with_property_csvs_loaded
re_local = pG.renumber_edges_by_type()
re_remote = rpG.renumber_edges_by_type()
assert re_local == re_remote
for k in range(len(re_remote)):
start = re_remote["start"][k]
stop = re_remote["stop"][k]
for i in range(start, stop + 1):
assert (
rpG.get_edge_data(edge_ids=[i])[rpG.type_col_name][0]
== re_remote.index[k]
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/client2_script.py | # Copyright (c) 2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to be used to simulate a cugraph_service client.
"""
import time
import random
from cugraph_service_client import CugraphServiceClient
client = CugraphServiceClient()
time.sleep(10)
n = int(random.random() * 1000)
# print(f"---> starting {n}", flush=True)
for i in range(1000000):
extracted_gid = client.extract_subgraph(allow_multi_edges=False)
# client.delete_graph(extracted_gid)
# print(f"---> {n}: extracted {extracted_gid}", flush=True)
# print(f"---> done {n}", flush=True)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/README.md | # `cugraph_service` Tests
## Prerequisites
* The multi-GPU tests (`test_mg_*.py` files) require a `dask` scheduler and workers to be running on the test machine, with the path to the generated scheduler JSON file set in the env var `SCHEDULER_FILE`. These also assume the test machine has at least two GPUs, which can be accessed via device IDs 0 and 1.
* When running on a multi-GPU machine with >2 GPUs, the `pytest` process can be limited to specific GPUs using the `CUDA_VISIBLE_DEVICES` env var. For example, `export CUDA_VISIBLE_DEVICES=6,7` will limit the processes run in that environment to the two GPUs identified as 6 and 7, and within the process GPU 6 will be accessed as device `0`, GPU 7 will be device `1`.
* The `dask` scheduler and workers can be run using the scripts in this repo: `<cugraph repo dir>/python/cugraph_service/scripts/run-dask-process.sh` (see `../README.md` for examples)
## End-to-end tests
* End-to-end (e2e) tests test code paths from the client to the server running in a separate process.
* e2e tests use pytest fixtures which automatically start a server subprocess in the background, and terminate it at the end of the test run(s). One challenge with this is STDOUT and STDERR is not currently redirected to the console running pytest, making debugging errors much harder.
* In order to debug in this situation, a user can start a server manually in the background prior to running pytest. If pytest detects a running server, it will use that instance instead of starting a new one. This allows the user to have access to the STDOUT and STDERR of the server process, as well as the ability to interactively debug it using `breakpoint()` calls if necessary.
## cugraph_handler tests
* cugraph_handler tests do not require a separate server process to be running, since these are tests which import the handler - just as the server script would - and run methods directly on it. This tests the majorty of the code paths on the much larger server side, without the overhead of an e2e test.
* SG cugraph_handler tests are run in CI since 1) MG tests are not supported in CI, and 2) they provide the majority of the code coverage in a way that can be debugged without requiring access to separate processes (which would be difficult in the current CI system)
## client tests
* The client class is currently tested only through end-to-end tests, but in the future could be tested in isolation using a mock object to simulate interaction with a running server.
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/test_mg_e2e.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import pytest
import cupy as cp
from cugraph_service_server.testing import utils
from . import data
###############################################################################
# fixtures
@pytest.fixture(scope="module")
def mg_server():
"""
Start a cugraph_service server that uses multiple GPUs via a dask
configuration, then stop it when done with the fixture.
This requires that a dask scheduler be running, and the corresponding
SCHEDULER_FILE env var is set. The scheduler can be started using the
script in this repo:
"<repo>/python/cugraph_service/scripts/run-dask-process.sh scheduler
workers"
"""
from cugraph_service_client import CugraphServiceClient
from cugraph_service_client.exceptions import CugraphServiceError
server_process = None
host = "0.0.0.0"
port = 9090
client = CugraphServiceClient(host, port)
try:
client.uptime()
print("FOUND RUNNING SERVER, ASSUMING IT SHOULD BE USED FOR TESTING!")
yield
except CugraphServiceError:
# A server was not found, so start one for testing then stop it when
# testing is done.
dask_scheduler_file = os.environ.get("SCHEDULER_FILE")
start_local_cuda_cluster = False
if dask_scheduler_file is None:
start_local_cuda_cluster = True
else:
dask_scheduler_file = Path(dask_scheduler_file)
if not dask_scheduler_file.exists():
raise FileNotFoundError(
"env var SCHEDULER_FILE is set to "
f"{dask_scheduler_file}, which does not "
"exist."
)
server_process = utils.start_server_subprocess(
host=host,
port=port,
start_local_cuda_cluster=start_local_cuda_cluster,
dask_scheduler_file=dask_scheduler_file,
)
# yield control to the tests, cleanup on return
yield
print("\nTerminating server...", end="", flush=True)
server_process.terminate()
server_process.wait(timeout=60)
print("done.", flush=True)
@pytest.fixture(scope="module")
def sg_server_on_device_1():
"""
Start a cugraph_service server, stop it when done with the fixture.
"""
from cugraph_service_client import CugraphServiceClient
from cugraph_service_client.exceptions import CugraphServiceError
host = "localhost"
port = 9090
client = CugraphServiceClient(host, port)
try:
client.uptime()
print("FOUND RUNNING SERVER, ASSUMING IT SHOULD BE USED FOR TESTING!")
yield
except CugraphServiceError:
# A server was not found, so start one for testing then stop it when
# testing is done.
server_process = utils.start_server_subprocess(
host=host,
port=port,
env_additions={"CUDA_VISIBLE_DEVICES": "1"},
)
# yield control to the tests, cleanup on return
yield
print("\nTerminating server...", end="", flush=True)
server_process.terminate()
server_process.wait(timeout=60)
print("done.", flush=True)
@pytest.fixture(scope="module")
def client_of_mg_server(mg_server):
"""
Creates a client instance to the running server, closes the client when the
fixture is no longer used by tests.
"""
from cugraph_service_client import CugraphServiceClient, defaults
client = CugraphServiceClient(defaults.host, defaults.port)
# yield control to the tests, cleanup on return
yield client
client.close()
@pytest.fixture(scope="function")
def client_of_mg_server_with_edgelist_csv_loaded(client_of_mg_server):
"""
Loads the karate CSV into the default graph on the server.
"""
test_data = data.edgelist_csv_data["karate"]
client = client_of_mg_server
client.load_csv_as_edge_data(
test_data["csv_file_name"],
dtypes=test_data["dtypes"],
vertex_col_names=["0", "1"],
type_name="",
)
assert client.get_graph_ids() == [0]
# yield control to the tests, cleanup on return
yield (client, test_data)
for gid in client.get_graph_ids():
client.delete_graph(gid)
@pytest.fixture(scope="module")
def client_of_sg_server_on_device_1(sg_server_on_device_1):
"""
Creates a client instance to a server running on device 1, closes the
client when the fixture is no longer used by tests.
"""
from cugraph_service_client import CugraphServiceClient, defaults
client = CugraphServiceClient(defaults.host, defaults.port)
for gid in client.get_graph_ids():
client.delete_graph(gid)
# FIXME: should this fixture always unconditionally unload all extensions?
# client.unload_graph_creation_extensions()
# yield control to the tests, cleanup on return
yield client
client.close()
@pytest.fixture(
scope="module",
params=[int(n) for n in [1e1, 1e3, 1e6, 1e9, 2e9]],
ids=lambda p: f"bytes={p:.1e}",
)
def client_of_sg_server_on_device_1_with_test_array(
request,
client_of_sg_server_on_device_1,
):
client = client_of_sg_server_on_device_1
nbytes = request.param
test_array_id = client._create_test_array(nbytes)
# yield control to the tests, cleanup on return
yield (client, test_array_id, nbytes)
client._delete_test_array(test_array_id)
@pytest.fixture(scope="function")
def client_of_sg_server_on_device_1_large_property_graph_loaded(
client_of_sg_server_on_device_1,
graph_creation_extension_large_property_graph,
):
client = client_of_sg_server_on_device_1
server_extension_dir = graph_creation_extension_large_property_graph
ext_mod_names = client.load_graph_creation_extensions(server_extension_dir)
# Assume fixture that starts server on device 1 has the extension loaded
# for creating large property graphs.
new_graph_id = client.call_graph_creation_extension(
"graph_creation_extension_large_property_graph"
)
assert new_graph_id in client.get_graph_ids()
# yield control to the tests, cleanup on return
yield (client, new_graph_id)
client.delete_graph(new_graph_id)
for mod_name in ext_mod_names:
client.unload_extension_module(mod_name)
# This fixture is parametrized for different device IDs to test against, and
# simply returns the param value to the test using it.
@pytest.fixture(scope="module", params=[None, 0], ids=lambda p: f"device={p}")
def result_device_id(request):
return request.param
###############################################################################
# tests
def test_get_default_graph_info(client_of_mg_server_with_edgelist_csv_loaded):
"""
Test to ensure various info on the default graph loaded from the specified
fixture is correct.
"""
(client_of_mg_server, test_data) = client_of_mg_server_with_edgelist_csv_loaded
# get_graph_type() is a test/debug API which returns a string repr of the
# graph type. Ideally, users should not need to know the graph type.
assert "MG" in client_of_mg_server._get_graph_type()
assert client_of_mg_server.get_graph_info(["num_edges"]) == test_data["num_edges"]
assert client_of_mg_server.get_server_info()["num_gpus"] > 1
def test_get_edge_IDs_for_vertices(client_of_mg_server_with_edgelist_csv_loaded):
(client_of_mg_server, test_data) = client_of_mg_server_with_edgelist_csv_loaded
# get_graph_type() is a test/debug API which returns a string repr of the
# graph type. Ideally, users should not need to know the graph type.
assert "MG" in client_of_mg_server._get_graph_type()
graph_id = client_of_mg_server.extract_subgraph(check_multi_edges=True)
client_of_mg_server.get_edge_IDs_for_vertices([1, 2, 3], [0, 0, 0], graph_id)
def test_device_transfer(
benchmark,
result_device_id,
client_of_sg_server_on_device_1_with_test_array,
):
(client, test_array_id, nbytes) = client_of_sg_server_on_device_1_with_test_array
# device to host via RPC is too slow for large transfers, so skip
if result_device_id is None and nbytes > 1e6:
return
bytes_returned = benchmark(
client._receive_test_array,
test_array_id,
result_device=result_device_id,
)
# bytes_returned should be a cupy array of int8 values on
# result_device_id, and each value should be 1.
# Why not uint8 and value 255? Because when transferring data to a CPU
# (result_device=None), Apache Thrift is used, which does not support
# unsigned int types.
assert len(bytes_returned) == nbytes
if result_device_id is None:
assert type(bytes_returned) is list
assert False not in [n == 1 for n in bytes_returned]
else:
assert type(bytes_returned) is cp.ndarray
assert (bytes_returned == cp.ones(nbytes, dtype="int8")).all()
device_n = cp.cuda.Device(result_device_id)
assert bytes_returned.device == device_n
def test_uniform_neighbor_sampling_result_on_device_error(
client_of_sg_server_on_device_1_large_property_graph_loaded,
):
"""
Ensure errors are handled properly when using device transfer
"""
from cugraph_service_client.exceptions import CugraphServiceError
(client, graph_id) = client_of_sg_server_on_device_1_large_property_graph_loaded
extracted_graph_id = client.extract_subgraph(graph_id=graph_id)
start_list = [0, 1, 2]
fanout_vals = [] # should raise an exception
with_replacement = False
with pytest.raises(CugraphServiceError):
client.uniform_neighbor_sample(
start_list=start_list,
fanout_vals=fanout_vals,
with_replacement=with_replacement,
graph_id=extracted_graph_id,
result_device=0,
)
def test_uniform_neighbor_sampling_result_on_device(
benchmark,
result_device_id,
client_of_sg_server_on_device_1_large_property_graph_loaded,
):
"""
Ensures uniform_neighbor_sample() results are transfered from the server to
a specific client device when specified.
"""
(client, graph_id) = client_of_sg_server_on_device_1_large_property_graph_loaded
extracted_graph_id = client.extract_subgraph(graph_id=graph_id)
start_list = [0, 1, 2]
fanout_vals = [2]
with_replacement = False
result = benchmark(
client.uniform_neighbor_sample,
start_list=start_list,
fanout_vals=fanout_vals,
with_replacement=with_replacement,
graph_id=extracted_graph_id,
result_device=result_device_id,
)
assert len(result.sources) == len(result.destinations) == len(result.indices)
dtype = type(result.sources)
if result_device_id is None:
# host memory
assert dtype is list
else:
# device memory
assert dtype is cp.ndarray
device_n = cp.cuda.Device(result_device_id)
assert result.sources.device == device_n
def test_call_extension_result_on_device_error(
extension1, client_of_sg_server_on_device_1
):
"""
Ensure errors are handled properly when using device transfer
"""
from cugraph_service_client.exceptions import CugraphServiceError
client = client_of_sg_server_on_device_1
extension_dir = extension1
array1_len = 1.23 # should raise an exception
array2_len = 10
ext_mod_names = client.load_extensions(extension_dir)
with pytest.raises(CugraphServiceError):
client.call_extension(
"my_nines_function",
array1_len,
"int32",
array2_len,
"float64",
result_device=0,
)
for mod_name in ext_mod_names:
client.unload_extension_module(mod_name)
def test_call_extension_result_on_device(
benchmark, extension1, result_device_id, client_of_sg_server_on_device_1
):
client = client_of_sg_server_on_device_1
extension_dir = extension1
array1_len = int(1e5)
array2_len = int(1e5)
ext_mod_names = client.load_extensions(extension_dir)
# my_nines_function in extension1 returns a list of two lists of 9's with
# sizes and dtypes based on args.
results = benchmark(
client.call_extension,
"my_nines_function",
array1_len,
"int32",
array2_len,
"float64",
result_device=result_device_id,
)
if result_device_id is None:
assert len(results) == 2
assert len(results[0]) == array1_len
assert len(results[1]) == array2_len
assert type(results[0][0]) == int
assert type(results[1][0]) == float
assert results[0][0] == 9
assert results[1][0] == 9.0
else:
# results will be a n-tuple where n is the number of arrays returned. The
# n-tuple contains each array as a device array on result_device_id.
assert isinstance(results, list)
assert len(results) == 2
device_n = cp.cuda.Device(result_device_id)
assert isinstance(results[0], cp.ndarray)
assert results[0].device == device_n
assert results[0].tolist() == [9] * array1_len
assert isinstance(results[1], cp.ndarray)
assert results[1].device == device_n
assert results[1].tolist() == [9.0] * array2_len
for mod_name in ext_mod_names:
client.unload_extension_module(mod_name)
def test_extension_adds_graph(
extension_adds_graph, result_device_id, client_of_sg_server_on_device_1
):
"""
Ensures an extension can create and add a graph to the server and return the
new graph ID and other data.
"""
extension_dir = extension_adds_graph
client = client_of_sg_server_on_device_1
ext_mod_names = client.load_extensions(extension_dir)
# The extension will add a graph, compute a value based on the graph data,
# and return the new graph ID and the result.
graph_ids_before = client.get_graph_ids()
val1 = 22
val2 = 33.1
results = client.call_extension(
"my_extension", val1, val2, result_device=result_device_id
)
graph_ids_after = client.get_graph_ids()
assert len(graph_ids_after) - len(graph_ids_before) == 1
new_gid = (set(graph_ids_after) - set(graph_ids_before)).pop()
assert len(results) == 2
assert results[0] == new_gid
expected_edge_ids = [0, 1, 2]
expected_val = [n + val1 + val2 for n in expected_edge_ids]
if result_device_id is None:
assert results[1] == expected_val
else:
device_n = cp.cuda.Device(result_device_id)
assert results[0].device == device_n
assert results[1].device == device_n
assert results[1].tolist() == expected_val
# FIXME: much of this test could be in a fixture which ensures the extension
# is unloaded from the server before returning
for mod_name in ext_mod_names:
client.unload_extension_module(mod_name)
def test_inside_asyncio_event_loop(
client_of_sg_server_on_device_1_large_property_graph_loaded, result_device_id
):
import asyncio
client, graph_id = client_of_sg_server_on_device_1_large_property_graph_loaded
start_list = [1, 2, 3]
fanout_vals = [2, 2, 2]
with_replacement = True
async def uns():
return client.uniform_neighbor_sample(
start_list=start_list,
fanout_vals=fanout_vals,
with_replacement=with_replacement,
graph_id=graph_id,
result_device=result_device_id,
)
# ensure call succeeds; have confirmed this fails without fix in client
assert asyncio.run(uns()) is not None
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/gen_demo_data.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
###############################################################################
# vertex CSV
colors = ["red", "white", "blue", "green", "yellow", "orange", "black", "purple"]
with open("vertex_data.csv", "w") as vertex_out:
print("vertex_id color num_stars", file=vertex_out)
for i in range(1000):
print(
f"{i} {random.choice(colors)} {int(random.random() * 10000)}",
file=vertex_out,
)
###############################################################################
# edge CSV
relationship = ["friend", "coworker", "reviewer"]
ids = range(1000)
with open("edge_data.csv", "w") as edge_out:
print("src dst relationship_type num_interactions", file=edge_out)
for i in range(10000):
src = random.choice(ids)
dst = random.choice(ids)
while src == dst:
dst = random.choice(ids)
print(
f"{src} {dst} "
f"{random.choice(relationship)} "
f"{int((random.random() + 1) * 10)}",
file=edge_out,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/conftest.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from cugraph_service_server.testing import utils
graph_creation_extension1_file_contents = """
import cudf
from cugraph.experimental import PropertyGraph
def custom_graph_creation_function(server):
edgelist = cudf.DataFrame(columns=['src', 'dst'],
data=[(0, 77), (1, 88), (2, 99)])
pG = PropertyGraph()
pG.add_edge_data(edgelist, vertex_col_names=('src', 'dst'))
# smoke test the server object by accesing the "mg" attr
server.is_multi_gpu
return pG
"""
graph_creation_extension2_file_contents = """
import cudf
from cugraph.experimental import PropertyGraph
def __my_private_function():
pass
def my_graph_creation_function(arg1:str, arg2:str, arg3:str, server):
edgelist = cudf.DataFrame(columns=[arg1, arg2, arg3],
data=[(0, 1, 2), (88, 99, 77)])
pG = PropertyGraph()
pG.add_edge_data(edgelist, vertex_col_names=(arg1, arg2))
return pG
"""
graph_creation_extension_long_running_file_contents = """
import time
import cudf
from cugraph.experimental import PropertyGraph
def long_running_graph_creation_function(server):
time.sleep(10)
pG = PropertyGraph()
return pG
"""
graph_creation_extension_no_facade_arg_file_contents = """
import time
import cudf
from cugraph.experimental import PropertyGraph
def graph_creation_function(arg1, arg2):
time.sleep(10)
pG = PropertyGraph()
return pG
"""
graph_creation_extension_bad_arg_order_file_contents = """
import time
import cudf
from cugraph.experimental import PropertyGraph
def graph_creation_function(server, arg1, arg2):
pG = PropertyGraph()
return pG
"""
graph_creation_extension_empty_graph_file_contents = """
import time
import cudf
from cugraph.experimental import PropertyGraph, MGPropertyGraph
def graph_creation_function(server):
if server.is_multi_gpu:
pG = MGPropertyGraph()
else:
pG = PropertyGraph()
return pG
"""
graph_creation_extension_big_vertex_ids_file_contents = """
import cudf
import cupy
import dask_cudf
from cugraph.experimental import PropertyGraph, MGPropertyGraph
def graph_creation_function_vert_and_edge_data_big_vertex_ids(server):
if server.is_multi_gpu:
pG = MGPropertyGraph()
else:
pG = PropertyGraph()
big_num = (2**32)+1
df = cudf.DataFrame({"vert_id":cupy.arange(big_num, big_num+10,
dtype="int64"),
"vert_prop":cupy.arange(big_num+100, big_num+110,
dtype="int64")})
if server.is_multi_gpu:
df = dask_cudf.from_cudf(df, npartitions=2)
pG.add_vertex_data(df, vertex_col_name="vert_id")
df = cudf.DataFrame({"src":cupy.arange(big_num, big_num+10, dtype="int64"),
"dst":cupy.arange(big_num+1,big_num+11, dtype="int64"),
"edge_prop":cupy.arange(big_num+100, big_num+110,
dtype="int64")})
if server.is_multi_gpu:
df = dask_cudf.from_cudf(df, npartitions=2)
pG.add_edge_data(df, vertex_col_names=["src", "dst"])
return pG
"""
graph_creation_extension_large_property_graph_file_contents = """
import cudf
import cupy
import dask_cudf
from cugraph.experimental import PropertyGraph, MGPropertyGraph
def graph_creation_extension_large_property_graph(server):
if server.is_multi_gpu:
pG = MGPropertyGraph()
else:
pG = PropertyGraph()
num_verts = 10e6
df = cudf.DataFrame({"vert_id":cupy.arange(num_verts, dtype="int32"),
"vert_prop":cupy.arange(num_verts, dtype="int32"),
})
if server.is_multi_gpu:
df = dask_cudf.from_cudf(df, npartitions=2)
pG.add_vertex_data(df, vertex_col_name="vert_id")
df = cudf.DataFrame({"src":cupy.arange(num_verts, dtype="int32"),
"dst":cupy.arange(1, num_verts+1, dtype="int32"),
"edge_prop":cupy.arange(num_verts, dtype="int32"),
})
if server.is_multi_gpu:
df = dask_cudf.from_cudf(df, npartitions=2)
pG.add_edge_data(df, vertex_col_names=["src", "dst"])
return pG
"""
extension1_file_contents = """
import cupy as cp
def my_nines_function(array1_size, array1_dtype, array2_size, array2_dtype):
'''
Returns 2 arrays of size and dtype specified containing only 9s
'''
array1 = cp.array([9] * array1_size, dtype=array1_dtype)
array2 = cp.array([9] * array2_size, dtype=array2_dtype)
return (array1, array2)
"""
extension_with_facade_file_contents = """
import cupy
def my_extension(arg1, arg2, server):
# This extension assumes the server already has a single PG loaded via
# calling graph_creation_extension1
gid = server.get_graph_ids()[0]
pG = server.get_graph(gid)
edge_df = pG.get_edge_data()
# Do an arbitrary operation on the PG based on the args, and return the
# result as a cupy array.
retval = cupy.array(edge_df[pG.edge_id_col_name] + arg1 + arg2)
return retval
"""
extension_returns_none_file_contents = """
def my_extension():
return None
"""
extension_adds_graph_file_contents = """
import cupy
import cudf
from cugraph.experimental import PropertyGraph
def my_extension(arg1, arg2, server):
'''
This extension creates a new graph, registers it with the server, and
returns the new graph ID and some additional data.
'''
df = cudf.DataFrame({"src": [0, 1, 2],
"dst": [1, 2, 3],
"edge_prop": ["a", "b", "c"],
})
pG = PropertyGraph()
pG.add_edge_data(df, vertex_col_names=["src", "dst"])
pG_gid = server.add_graph(pG)
edge_df = pG.get_edge_data()
values = cupy.array(edge_df[pG.edge_id_col_name] + arg1 + arg2)
# UCX-Py transfers require cupy types, and cupy types are converted to host
# for non-UCX-Py transfers.
pG_gid = cupy.int8(pG_gid)
return (pG_gid, values)
"""
###############################################################################
# module scope fixtures
@pytest.fixture(scope="module")
def server():
"""
Start a cugraph_service server, stop it when done with the fixture.
"""
from cugraph_service_client import CugraphServiceClient
from cugraph_service_client.exceptions import CugraphServiceError
host = "localhost"
port = 9090
client = CugraphServiceClient(host, port)
try:
client.uptime()
print("FOUND RUNNING SERVER, ASSUMING IT SHOULD BE USED FOR TESTING!")
yield
except CugraphServiceError:
# A server was not found, so start one for testing then stop it when
# testing is done.
server_process = utils.start_server_subprocess(host=host, port=port)
# yield control to the tests, cleanup on return
yield
# tests are done, now stop the server
print("\nTerminating server...", end="", flush=True)
server_process.terminate()
server_process.wait(timeout=60)
print("done.", flush=True)
@pytest.fixture(scope="module")
def graph_creation_extension1():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension1_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def graph_creation_extension2():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension2_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def graph_creation_extension_long_running():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension_long_running_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def graph_creation_extension_no_facade_arg():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension_no_facade_arg_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def graph_creation_extension_bad_arg_order():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension_bad_arg_order_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def graph_creation_extension_big_vertex_ids():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension_big_vertex_ids_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def graph_creation_extension_empty_graph():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension_empty_graph_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def graph_creation_extension_large_property_graph():
tmp_extension_dir = utils.create_tmp_extension_dir(
graph_creation_extension_large_property_graph_file_contents
)
yield tmp_extension_dir.name
# General (ie. not graph creation) extension
@pytest.fixture(scope="module")
def extension1():
tmp_extension_dir = utils.create_tmp_extension_dir(extension1_file_contents)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def extension_with_facade():
tmp_extension_dir = utils.create_tmp_extension_dir(
extension_with_facade_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def extension_returns_none():
tmp_extension_dir = utils.create_tmp_extension_dir(
extension_returns_none_file_contents
)
yield tmp_extension_dir.name
@pytest.fixture(scope="module")
def extension_adds_graph():
tmp_extension_dir = utils.create_tmp_extension_dir(
extension_adds_graph_file_contents
)
yield tmp_extension_dir.name
###############################################################################
# function scope fixtures
@pytest.fixture(scope="function")
def client(server):
"""
Creates a client instance to the running server, closes the client when the
fixture is no longer used by tests.
"""
from cugraph_service_client import CugraphServiceClient, defaults
client = CugraphServiceClient(defaults.host, defaults.port)
for gid in client.get_graph_ids():
client.delete_graph(gid)
# FIXME: should this fixture always unconditionally unload all extensions?
# client.unload_graph_creation_extensions()
# yield control to the tests, cleanup on return
yield client
client.close()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/test_mg_cugraph_handler.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import pickle
import pytest
from . import data
###############################################################################
# fixtures
@pytest.fixture(scope="module")
def mg_handler():
"""
Creates a cugraph_service handler that uses a dask client.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
dask_scheduler_file = os.environ.get("SCHEDULER_FILE")
if dask_scheduler_file is None:
handler.initialize_dask_client()
else:
dask_scheduler_file = Path(dask_scheduler_file)
if not dask_scheduler_file.exists():
raise FileNotFoundError(
"env var SCHEDULER_FILE is set to "
f"{dask_scheduler_file}, which does not "
"exist."
)
handler.initialize_dask_client(dask_scheduler_file)
return handler
# Make this a function-level fixture so it cleans up the mg_handler after each
# test, allowing other tests to use mg_handler without graphs loaded.
@pytest.fixture(scope="function")
def handler_with_karate_edgelist_loaded(mg_handler):
"""
Loads the karate CSV into the default graph in the handler.
"""
from cugraph_service_client import defaults
test_data = data.edgelist_csv_data["karate"]
# Ensure the handler starts with no graphs in memory
for gid in mg_handler.get_graph_ids():
mg_handler.delete_graph(gid)
mg_handler.load_csv_as_edge_data(
test_data["csv_file_name"],
delimiter=" ",
dtypes=test_data["dtypes"],
header=None,
vertex_col_names=["0", "1"],
type_name="",
property_columns=[],
names=[],
edge_id_col_name="",
graph_id=defaults.graph_id,
)
assert mg_handler.get_graph_ids() == [0]
yield (mg_handler, test_data)
for gid in mg_handler.get_graph_ids():
mg_handler.delete_graph(gid)
###############################################################################
# tests
# FIXME: consolidate this with the SG version of this test.
def test_get_graph_data_large_vertex_ids(
mg_handler,
graph_creation_extension_big_vertex_ids,
):
"""
Test that graphs with large vertex ID values (>int32) are handled.
"""
handler = mg_handler
extension_dir = graph_creation_extension_big_vertex_ids
# Load the extension and ensure it can be called.
handler.load_graph_creation_extensions(extension_dir)
new_graph_id = handler.call_graph_creation_extension(
"graph_creation_function_vert_and_edge_data_big_vertex_ids", "()", "{}"
)
invalid_vert_id = 2
vert_data = handler.get_graph_vertex_data(
id_or_ids=invalid_vert_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(vert_data)) == 0
large_vert_id = (2**32) + 1
vert_data = handler.get_graph_vertex_data(
id_or_ids=large_vert_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(vert_data)) == 1
invalid_edge_id = (2**32) + 1
edge_data = handler.get_graph_edge_data(
id_or_ids=invalid_edge_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(edge_data)) == 0
small_edge_id = 2
edge_data = handler.get_graph_edge_data(
id_or_ids=small_edge_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(edge_data)) == 1
# FIXME: consolidate this with the SG version of this test.
def test_get_graph_data_empty_graph(
mg_handler,
graph_creation_extension_empty_graph,
):
"""
Tests that get_graph_*_data() handles empty graphs correctly.
"""
handler = mg_handler
extension_dir = graph_creation_extension_empty_graph
# Load the extension and ensure it can be called.
handler.load_graph_creation_extensions(extension_dir)
new_graph_id = handler.call_graph_creation_extension(
"graph_creation_function", "()", "{}"
)
invalid_vert_id = 2
vert_data = handler.get_graph_vertex_data(
id_or_ids=invalid_vert_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(vert_data)) == 0
invalid_edge_id = 2
edge_data = handler.get_graph_edge_data(
id_or_ids=invalid_edge_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(edge_data)) == 0
def test_get_edge_IDs_for_vertices(handler_with_karate_edgelist_loaded):
from cugraph_service_client import defaults
(handler, test_data) = handler_with_karate_edgelist_loaded
# Use the test/debug API to ensure the correct type was created
assert "MG" in handler.get_graph_type(defaults.graph_id)
extracted_graph_id = handler.extract_subgraph(
create_using=None,
selection=None,
edge_weight_property=None,
default_edge_weight=1.0,
check_multi_edges=True,
renumber_graph=True,
add_edge_data=True,
graph_id=defaults.graph_id,
)
# FIXME: this assumes these are always the first 3 edges in karate, which
# may not be a safe assumption.
eIDs = handler.get_edge_IDs_for_vertices([1, 2, 3], [0, 0, 0], extracted_graph_id)
assert eIDs == [0, 1, 2]
def test_get_graph_info(handler_with_karate_edgelist_loaded):
"""
get_graph_info() for specific args.
"""
from cugraph_service_client import defaults
(handler, test_data) = handler_with_karate_edgelist_loaded
# A common use of get_graph_info() is to get the "shape" of the data,
# meaning the number of vertices/edges by the number of properites per
# edge/vertex.
info = handler.get_graph_info(
["num_edges", "num_edge_properties"], defaults.graph_id
)
# info is a dictionary containing cugraph_service_client.types.ValueWrapper
# objs, so access the int32 member directly for easy comparison.
shape = (
info["num_edges"].get_py_obj(),
info["num_edge_properties"].get_py_obj(),
)
assert shape == (156, 1) # The single edge property is the weight
info = handler.get_graph_info(
["num_vertices_from_vertex_data", "num_vertex_properties"], defaults.graph_id
)
shape = (
info["num_vertices_from_vertex_data"].get_py_obj(),
info["num_vertex_properties"].get_py_obj(),
)
assert shape == (0, 0)
def test_get_graph_info_defaults(mg_handler):
"""
Ensure calling get_graph_info() with no args returns the info dict with all
keys present for an empty default graph.
"""
from cugraph_service_client import defaults
handler = mg_handler
info = handler.get_graph_info([], graph_id=defaults.graph_id)
expected = {
"is_multi_gpu": True,
"num_vertices": 0,
"num_vertices_from_vertex_data": 0,
"num_edges": 0,
"num_vertex_properties": 0,
"num_edge_properties": 0,
}
actual = {key: val.get_py_obj() for (key, val) in info.items()}
assert expected == actual
def test_uniform_neighbor_sampling(handler_with_karate_edgelist_loaded):
from cugraph_service_client import defaults
(handler, test_data) = handler_with_karate_edgelist_loaded
start_list = [1, 2, 3]
fanout_vals = [2, 2, 2]
with_replacement = True
# FIXME: add test coverage for specifying the edge ID as the
# edge_weight_property, then ensuring the edge ID is returned properly with
# the uniform_neighbor_sample results.
# See: https://github.com/rapidsai/cugraph/issues/2654
extracted_gid = handler.extract_subgraph(
create_using=None,
selection=None,
edge_weight_property=None,
default_edge_weight=1.0,
check_multi_edges=True,
renumber_graph=True,
add_edge_data=True,
graph_id=defaults.graph_id,
)
# Ensure call can be made, assume results verified in other tests
handler.uniform_neighbor_sample(
start_list=start_list,
fanout_vals=fanout_vals,
with_replacement=with_replacement,
graph_id=extracted_gid,
result_host=None,
result_port=None,
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/test_cugraph_handler.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import pickle
from pathlib import Path
import pytest
# FIXME: Remove this once these pass in the CI environment.
pytest.skip(
reason="FIXME: many of these tests fail in CI and are currently run "
"manually only in dev environments.",
allow_module_level=True,
)
###############################################################################
# fixtures
# The fixtures used in these tests are defined in conftest.py
###############################################################################
# tests
def test_load_and_call_graph_creation_extension(graph_creation_extension2):
"""
Ensures load_extensions reads the extensions and makes the new APIs they
add available.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
from cugraph_service_client.exceptions import CugraphServiceError
handler = CugraphHandler()
extension_dir = graph_creation_extension2
# DNE
with pytest.raises(CugraphServiceError):
handler.load_graph_creation_extensions("/path/that/does/not/exist")
# Exists, but is a file
with pytest.raises(CugraphServiceError):
handler.load_graph_creation_extensions(__file__)
# Load the extension and call the function defined in it
ext_mod_names = handler.load_graph_creation_extensions(extension_dir)
assert len(ext_mod_names) == 1
expected_mod_name = (Path(extension_dir) / "my_extension.py").as_posix()
assert ext_mod_names[0] == expected_mod_name
# Private function should not be callable
with pytest.raises(CugraphServiceError):
handler.call_graph_creation_extension("__my_private_function", "()", "{}")
# Function which DNE in the extension
with pytest.raises(CugraphServiceError):
handler.call_graph_creation_extension("bad_function_name", "()", "{}")
# Wrong number of args
with pytest.raises(CugraphServiceError):
handler.call_graph_creation_extension(
"my_graph_creation_function", "('a',)", "{}"
)
# This call should succeed and should result in a new PropertyGraph present
# in the handler instance.
new_graph_ID = handler.call_graph_creation_extension(
"my_graph_creation_function", "('a', 'b', 'c')", "{}"
)
assert new_graph_ID in handler.get_graph_ids()
# Inspect the PG and ensure it was created from my_graph_creation_function
pG = handler._get_graph(new_graph_ID)
edge_props = pG.edge_property_names
assert "c" in edge_props
def test_load_call_unload_extensions(graph_creation_extension2, extension1):
"""
Ensure extensions can be loaded, run, and unloaded.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
from cugraph_service_client.exceptions import CugraphServiceError
handler = CugraphHandler()
graph_creation_extension_dir = graph_creation_extension2
extension_dir = extension1
# Loading
gc_ext_mod_names = handler.load_graph_creation_extensions(
graph_creation_extension_dir
)
ext_mod_names = handler.load_extensions(extension_dir)
# Running
new_graph_ID = handler.call_graph_creation_extension(
"my_graph_creation_function", "('a', 'b', 'c')", "{}"
)
assert new_graph_ID in handler.get_graph_ids()
results = handler.call_extension(
"my_nines_function", "(33, 'int32', 21, 'float64')", "{}"
)
# results is a ValueWrapper object which Thrift will understand to be a
# Value, which it can serialize. Check the ValueWrapper object here.
assert len(results.list_value) == 2
assert len(results.list_value[0].list_value) == 33
assert len(results.list_value[1].list_value) == 21
assert type(results.list_value[0].list_value[0].int32_value) is int
assert type(results.list_value[1].list_value[0].double_value) is float
assert results.list_value[0].list_value[0].int32_value == 9
assert results.list_value[1].list_value[0].double_value == 9.0
# Unloading
with pytest.raises(CugraphServiceError):
handler.unload_extension_module("invalid_module")
for mod_name in gc_ext_mod_names:
handler.unload_extension_module(mod_name)
with pytest.raises(CugraphServiceError):
handler.call_graph_creation_extension(
"my_graph_creation_function", "('a', 'b', 'c')", "{}"
)
handler.call_extension("my_nines_function", "(33, 'int32', 21, 'float64')", "{}")
for mod_name in ext_mod_names:
handler.unload_extension_module(mod_name)
with pytest.raises(CugraphServiceError):
handler.call_extension(
"my_nines_function", "(33, 'int32', 21, 'float64')", "{}"
)
def test_extension_with_facade_graph_access(
graph_creation_extension1, extension_with_facade
):
"""
Creates a Graph then calls an extension that accesses the graph in order to
return data.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
gc_extension_dir = graph_creation_extension1
extension_dir = extension_with_facade
# Load the extensions - use the graph creation extension to create a known PG
# for use by the extension being tested.
handler.load_graph_creation_extensions(gc_extension_dir)
handler.load_extensions(extension_dir)
new_graph_ID = handler.call_graph_creation_extension(
"custom_graph_creation_function", "()", "{}"
)
assert new_graph_ID in handler.get_graph_ids()
val1 = 33
val2 = 22.1
# Call the extension under test, it will access the PG loaded above to return
# results. This extension just adds val1 + val2 to each edge ID.
results = handler.call_extension("my_extension", f"({val1}, {val2})", "{}")
# results is a ValueWrapper object which Thrift will understand to be a Value, which
# it can serialize. Check the ValueWrapper object here, it should contain the 3 edge
# IDs starting from 0 with the values added to each.
assert len(results.list_value) == 3
assert results.list_value[0].double_value == 0 + val1 + val2
assert results.list_value[1].double_value == 1 + val1 + val2
assert results.list_value[2].double_value == 2 + val1 + val2
def test_load_call_unload_testing_extensions():
""" """
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
num_loaded = handler.load_graph_creation_extensions(
"cugraph_service_server.testing.benchmark_server_extension"
)
assert len(num_loaded) == 1
gid1 = handler.call_graph_creation_extension(
"create_graph_from_builtin_dataset", "('karate',)", "{}"
)
scale = 2
edgefactor = 2
gid2 = handler.call_graph_creation_extension(
"create_graph_from_rmat_generator",
"()",
f"{{'scale': {scale}, 'num_edges': {(scale**2) * edgefactor}, "
"'seed': 42, 'mg': False}",
)
assert gid1 != gid2
graph_info1 = handler.get_graph_info(keys=[], graph_id=gid1)
# since the handler returns a dictionary of objs used byt her serialization
# code, convert each item to a native python type for easy checking.
graph_info1 = {k: v.get_py_obj() for (k, v) in graph_info1.items()}
assert graph_info1["num_vertices"] == 34
assert graph_info1["num_edges"] == 78
graph_info2 = handler.get_graph_info(keys=[], graph_id=gid2)
graph_info2 = {k: v.get_py_obj() for (k, v) in graph_info2.items()}
assert graph_info2["num_vertices"] <= 4
assert graph_info2["num_edges"] <= 8
def test_load_call_unload_extensions_python_module_path(extension1):
"""
Load, run, unload an extension that was loaded using a python module
path (as would be used by an import statement) instead of a file path.
"""
from cugraph_service_client.exceptions import CugraphServiceError
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
extension_dir = extension1
extension_dir_path = Path(extension_dir).absolute()
package_name = extension_dir_path.name # last name in the path only
# Create an __init__py file and add the dir to sys.path so it can be
# imported as a package.
with open(extension_dir_path / "__init__.py", "w") as f:
f.write("")
# FIXME: this should go into a fixture which can unmodify sys.path when done
sys.path.append(extension_dir_path.parent.as_posix())
# Create another .py file to test multiple module loading
with open(extension_dir_path / "foo.py", "w") as f:
f.write("def foo_func(): return 33")
# Load everything in the package, ext_mod_names should be a list of python
# files containing 3 files (2 modules + __init__.py file).
# Assume the .py file in the generated extension dir is named
# "my_extension.py"
ext_mod_names1 = handler.load_extensions(package_name)
assert len(ext_mod_names1) == 3
assert str(extension_dir_path / "my_extension.py") in ext_mod_names1
assert str(extension_dir_path / "foo.py") in ext_mod_names1
assert str(extension_dir_path / "__init__.py") in ext_mod_names1
results = handler.call_extension(
"my_nines_function", "(33, 'int32', 21, 'float64')", "{}"
)
assert results.list_value[0].list_value[0].int32_value == 9
assert results.list_value[1].list_value[0].double_value == 9.0
result = handler.call_extension("foo_func", "()", "{}")
assert result.int32_value == 33
# unload
for mod_name in ext_mod_names1:
handler.unload_extension_module(mod_name)
with pytest.raises(CugraphServiceError):
handler.call_extension(
"my_nines_function", "(33, 'int32', 21, 'float64')", "{}"
)
with pytest.raises(CugraphServiceError):
handler.call_extension("foo_func", "()", "{}")
# Load just an individual module in the package, ext_mod_names should only
# contain 1 file.
mod_name = f"{package_name}.my_extension"
ext_mod_names2 = handler.load_extensions(mod_name)
assert ext_mod_names2 == [str(extension_dir_path / "my_extension.py")]
results = handler.call_extension(
"my_nines_function", "(33, 'int32', 21, 'float64')", "{}"
)
assert results.list_value[0].list_value[0].int32_value == 9
assert results.list_value[1].list_value[0].double_value == 9.0
for mod_name in ext_mod_names2:
handler.unload_extension_module(mod_name)
with pytest.raises(CugraphServiceError):
handler.call_extension(
"my_nines_function", "(33, 'int32', 21, 'float64')", "{}"
)
def test_load_call_unload_graph_creation_extension_no_args(graph_creation_extension1):
"""
Test graph_creation_extension1 which contains an extension with no args.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
extension_dir = graph_creation_extension1
# Load the extensions and ensure it can be called.
handler.load_graph_creation_extensions(extension_dir)
new_graph_ID = handler.call_graph_creation_extension(
"custom_graph_creation_function", "()", "{}"
)
assert new_graph_ID in handler.get_graph_ids()
def test_load_call_unload_graph_creation_extension_no_facade_arg(
graph_creation_extension_no_facade_arg,
):
"""
Test an extension that has no facade arg.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
extension_dir = graph_creation_extension_no_facade_arg
# Load the extensions and ensure it can be called.
handler.load_graph_creation_extensions(extension_dir)
new_graph_ID = handler.call_graph_creation_extension(
"graph_creation_function", "('a')", "{'arg2':33}"
)
assert new_graph_ID in handler.get_graph_ids()
def test_load_call_unload_graph_creation_extension_bad_arg_order(
graph_creation_extension_bad_arg_order,
):
"""
Test an extension that has the facade arg in the wrong position.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
from cugraph_service_client.exceptions import CugraphServiceError
handler = CugraphHandler()
extension_dir = graph_creation_extension_bad_arg_order
# Load the extensions and ensure it can be called.
handler.load_graph_creation_extensions(extension_dir)
with pytest.raises(CugraphServiceError):
handler.call_graph_creation_extension(
"graph_creation_function", "('a', 'b')", "{}"
)
def test_get_graph_data_large_vertex_ids(graph_creation_extension_big_vertex_ids):
"""
Test that graphs with large vertex ID values (>int32) are handled.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
extension_dir = graph_creation_extension_big_vertex_ids
# Load the extension and ensure it can be called.
handler.load_graph_creation_extensions(extension_dir)
new_graph_id = handler.call_graph_creation_extension(
"graph_creation_function_vert_and_edge_data_big_vertex_ids", "()", "{}"
)
invalid_vert_id = 2
vert_data = handler.get_graph_vertex_data(
id_or_ids=invalid_vert_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(vert_data)) == 0
large_vert_id = (2**32) + 1
vert_data = handler.get_graph_vertex_data(
id_or_ids=large_vert_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(vert_data)) == 1
invalid_edge_id = (2**32) + 1
edge_data = handler.get_graph_edge_data(
id_or_ids=invalid_edge_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(edge_data)) == 0
small_edge_id = 2
edge_data = handler.get_graph_edge_data(
id_or_ids=small_edge_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(edge_data)) == 1
def test_get_graph_data_empty_graph(graph_creation_extension_empty_graph):
"""
Tests that get_graph_*_data() handles empty graphs correctly.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
extension_dir = graph_creation_extension_empty_graph
# Load the extension and ensure it can be called.
handler.load_graph_creation_extensions(extension_dir)
new_graph_id = handler.call_graph_creation_extension(
"graph_creation_function", "()", "{}"
)
invalid_vert_id = 2
vert_data = handler.get_graph_vertex_data(
id_or_ids=invalid_vert_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(vert_data)) == 0
invalid_edge_id = 2
edge_data = handler.get_graph_edge_data(
id_or_ids=invalid_edge_id,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=new_graph_id,
)
assert len(pickle.loads(edge_data)) == 0
def test_get_server_info(graph_creation_extension1, extension1):
"""
Ensures the server meta-data from get_server_info() is correct. This
includes information about loaded extensions, so fixtures that provide
extensions to be loaded are used.
"""
from cugraph_service_server.cugraph_handler import CugraphHandler
handler = CugraphHandler()
handler.load_graph_creation_extensions(graph_creation_extension1)
handler.load_extensions(extension1)
meta_data = handler.get_server_info()
assert meta_data["num_gpus"].int32_value is not None
assert (
str(
Path(
meta_data["graph_creation_extensions"].list_value[0].get_py_obj()
).parent
)
== graph_creation_extension1
)
assert (
str(Path(meta_data["extensions"].list_value[0].get_py_obj()).parent)
== extension1
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/demo1.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from cugraph_service_client import CugraphServiceClient
# Use the location of this file for finding various data files
this_dir = Path(__file__).parent
# Use the defaults for host and port (localhost, 9090)
# Assume the server is running and using the same defaults!
client = CugraphServiceClient()
# Remove any graphs from a previous session!
for gid in client.get_graph_ids():
client.delete_graph(gid)
# Add vertex and edge data to the default graph instance (the default graph
# does not require a graph ID to access) The file names specified must be
# visible to the server.
client.load_csv_as_vertex_data(
(this_dir / "vertex_data.csv").absolute().as_posix(),
dtypes=["int32", "string", "int32"],
vertex_col_name="vertex_id",
header="infer",
)
client.load_csv_as_edge_data(
(this_dir / "edge_data.csv").absolute().as_posix(),
dtypes=["int32", "int32", "string", "int32"],
vertex_col_names=("src", "dst"),
header="infer",
)
# Verify the number of edges
assert client.get_num_edges() == 10000
# Run sampling and get a path, need to extract a subgraph first
extracted_gid = client.extract_subgraph(allow_multi_edges=True)
start_vertices = 11
max_depth = 2
(vertex_paths, edge_weights, path_sizes) = client.node2vec(
start_vertices, max_depth, extracted_gid
)
# Create another graph on the server
graph2 = client.create_graph()
# Verify that both the default and new graph are present on the server
assert len(client.get_graph_ids()) == 3
# Add edge data to the new graph
client.load_csv_as_vertex_data(
(this_dir / "vertex_data.csv").absolute().as_posix(),
dtypes=["int32", "string", "int32"],
vertex_col_name="vertex_id",
header="infer",
graph_id=graph2,
)
# Remove the new graph from the server and verify
client.delete_graph(graph2)
assert len(client.get_graph_ids()) == 2
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/multi_client_test_runner.sh | # Copyright (c) 2019-2022, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# source this script (ie. do not run it) for easier job control from the shell
# FIXME: change this and/or cugraph_service so PYTHONPATH is not needed
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client1_script.py &
sleep 1
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client2_script.py &
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client2_script.py &
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client2_script.py &
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client2_script.py &
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client2_script.py &
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client2_script.py &
PYTHONPATH=/Projects/cugraph/python/cugraph_service python client2_script.py
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/test_e2e.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Sequence
from pathlib import Path
import pytest
from . import data
###############################################################################
# fixtures
# The fixtures used in these tests are defined here and in conftest.py
@pytest.fixture(scope="function")
def client_with_graph_creation_extension_loaded(client, graph_creation_extension1):
"""
Loads the extension defined in graph_creation_extension1, unloads upon completion.
"""
server_extension_dir = graph_creation_extension1
extension_modnames = client.load_graph_creation_extensions(server_extension_dir)
# yield control to the tests, cleanup on return
yield client
for modname in extension_modnames:
client.unload_extension_module(modname)
@pytest.fixture(scope="function")
def client_with_edgelist_csv_loaded(client):
"""
Loads the karate CSV into the default graph on the server.
"""
test_data = data.edgelist_csv_data["karate"]
client.load_csv_as_edge_data(
test_data["csv_file_name"],
dtypes=test_data["dtypes"],
vertex_col_names=["0", "1"],
type_name="",
)
assert client.get_graph_ids() == [0]
return (client, test_data)
@pytest.fixture(scope="function")
def client_with_property_csvs_loaded(client):
"""
Loads each of the vertex and edge property CSVs into the default graph on
the server.
"""
merchants = data.property_csv_data["merchants"]
users = data.property_csv_data["users"]
transactions = data.property_csv_data["transactions"]
relationships = data.property_csv_data["relationships"]
referrals = data.property_csv_data["referrals"]
client.load_csv_as_vertex_data(
merchants["csv_file_name"],
dtypes=merchants["dtypes"],
vertex_col_name=merchants["vert_col_name"],
header=0,
type_name="merchants",
)
client.load_csv_as_vertex_data(
users["csv_file_name"],
dtypes=users["dtypes"],
vertex_col_name=users["vert_col_name"],
header=0,
type_name="users",
)
client.load_csv_as_edge_data(
transactions["csv_file_name"],
dtypes=transactions["dtypes"],
vertex_col_names=transactions["vert_col_names"],
header=0,
type_name="transactions",
)
client.load_csv_as_edge_data(
relationships["csv_file_name"],
dtypes=relationships["dtypes"],
vertex_col_names=relationships["vert_col_names"],
header=0,
type_name="relationships",
)
client.load_csv_as_edge_data(
referrals["csv_file_name"],
dtypes=referrals["dtypes"],
vertex_col_names=referrals["vert_col_names"],
header=0,
type_name="referrals",
)
assert client.get_graph_ids() == [0]
return (client, data.property_csv_data)
###############################################################################
# tests
def test_get_graph_info_key_types(client_with_property_csvs_loaded):
"""
Tests error handling for info keys passed in.
"""
from cugraph_service_client.exceptions import CugraphServiceError
(client, test_data) = client_with_property_csvs_loaded
with pytest.raises(TypeError):
client.get_graph_info(21) # bad key type
with pytest.raises(TypeError):
client.get_graph_info([21, "num_edges"]) # bad key type
with pytest.raises(CugraphServiceError):
client.get_graph_info("21") # bad key value
with pytest.raises(CugraphServiceError):
client.get_graph_info(["21"]) # bad key value
with pytest.raises(CugraphServiceError):
client.get_graph_info(["num_edges", "21"]) # bad key value
client.get_graph_info() # valid
def test_get_num_edges_default_graph(client_with_edgelist_csv_loaded):
(client, test_data) = client_with_edgelist_csv_loaded
assert client.get_graph_info("num_edges") == test_data["num_edges"]
def test_load_csv_as_edge_data_nondefault_graph(client):
from cugraph_service_client.exceptions import CugraphServiceError
test_data = data.edgelist_csv_data["karate"]
with pytest.raises(CugraphServiceError):
client.load_csv_as_edge_data(
test_data["csv_file_name"],
dtypes=test_data["dtypes"],
vertex_col_names=["0", "1"],
type_name="",
graph_id=9999,
)
def test_get_num_edges_nondefault_graph(client_with_edgelist_csv_loaded):
from cugraph_service_client.exceptions import CugraphServiceError
(client, test_data) = client_with_edgelist_csv_loaded
# Bad graph ID
with pytest.raises(CugraphServiceError):
client.get_graph_info("num_edges", graph_id=9999)
new_graph_id = client.create_graph()
client.load_csv_as_edge_data(
test_data["csv_file_name"],
dtypes=test_data["dtypes"],
vertex_col_names=["0", "1"],
type_name="",
graph_id=new_graph_id,
)
assert client.get_graph_info("num_edges") == test_data["num_edges"]
assert (
client.get_graph_info("num_edges", graph_id=new_graph_id)
== test_data["num_edges"]
)
def test_node2vec(client_with_edgelist_csv_loaded):
(client, test_data) = client_with_edgelist_csv_loaded
extracted_gid = client.extract_subgraph()
start_vertices = 11
max_depth = 2
(vertex_paths, edge_weights, path_sizes) = client.node2vec(
start_vertices, max_depth, extracted_gid
)
# FIXME: consider a more thorough test
assert isinstance(vertex_paths, list) and len(vertex_paths)
assert isinstance(edge_weights, list) and len(edge_weights)
assert isinstance(path_sizes, list) and len(path_sizes)
def test_extract_subgraph(client_with_edgelist_csv_loaded):
(client, test_data) = client_with_edgelist_csv_loaded
Gid = client.extract_subgraph(
create_using=None,
selection=None,
edge_weight_property="2",
default_edge_weight=None,
check_multi_edges=False,
)
# FIXME: consider a more thorough test
assert Gid in client.get_graph_ids()
def test_call_graph_creation_extension(client_with_graph_creation_extension_loaded):
"""
Ensure the graph creation extension preloaded by the server fixture is
callable.
"""
client = client_with_graph_creation_extension_loaded
new_graph_id = client.call_graph_creation_extension(
"custom_graph_creation_function"
)
assert new_graph_id in client.get_graph_ids()
# Inspect the PG and ensure it was created from
# custom_graph_creation_function
# FIXME: add client APIs to allow for a more thorough test of the graph
assert client.get_graph_info(["num_edges"], new_graph_id) == 3
def test_load_and_call_graph_creation_extension(
client_with_graph_creation_extension_loaded, graph_creation_extension2
):
"""
Tests calling a user-defined server-side graph creation extension from the
cugraph_service client.
"""
# The graph_creation_extension returns the tmp dir created which contains
# the extension
extension_dir = graph_creation_extension2
client = client_with_graph_creation_extension_loaded
ext_mod_names = client.load_graph_creation_extensions(extension_dir)
assert len(ext_mod_names) == 1
expected_mod_name = (Path(extension_dir) / "my_extension.py").as_posix()
assert ext_mod_names[0] == expected_mod_name
new_graph_id = client.call_graph_creation_extension(
"my_graph_creation_function", "a", "b", "c"
)
assert new_graph_id in client.get_graph_ids()
# Inspect the PG and ensure it was created from my_graph_creation_function
# FIXME: add client APIs to allow for a more thorough test of the graph
assert client.get_graph_info(["num_edges"], new_graph_id) == 2
# Ensure the other graph creation extension (loaded as part of
# client_with_graph_creation_extension_loaded) can still be called
new_graph_id = client.call_graph_creation_extension(
"custom_graph_creation_function"
)
assert new_graph_id in client.get_graph_ids()
def test_load_and_call_graph_creation_long_running_extension(
client_with_graph_creation_extension_loaded, graph_creation_extension_long_running
):
"""
Tests calling a user-defined server-side graph creation extension from the
cugraph_service client. This uses a client of a server that already has an
extension loaded to ensure both can properly coexist.
"""
# The graph_creation_extension returns the tmp dir created which contains
# the extension
extension_dir = graph_creation_extension_long_running
client = client_with_graph_creation_extension_loaded
ext_mod_names = client.load_graph_creation_extensions(extension_dir)
assert len(ext_mod_names) == 1
expected_mod_name = (Path(extension_dir) / "my_extension.py").as_posix()
assert ext_mod_names[0] == expected_mod_name
new_graph_id = client.call_graph_creation_extension(
"long_running_graph_creation_function"
)
assert new_graph_id in client.get_graph_ids()
# Inspect the PG and ensure it was created from my_graph_creation_function
# FIXME: add client APIs to allow for a more thorough test of the graph
assert client.get_graph_info(["num_edges"], new_graph_id) == 0
def test_load_call_unload_extension(client, extension1):
"""
Ensure extensions can be loaded, run, and unloaded.
"""
from cugraph_service_client.exceptions import CugraphServiceError
extension_dir = extension1
# Loading
ext_mod_names = client.load_extensions(extension_dir)
# Running
# my_nines_function in extension1 returns a list of two lists of 9's with
# sizes and dtypes based on args.
results = client.call_extension("my_nines_function", 33, "int32", 21, "float64")
assert len(results) == 2
assert len(results[0]) == 33
assert len(results[1]) == 21
assert type(results[0][0]) == int
assert type(results[1][0]) == float
assert results[0][0] == 9
assert results[1][0] == 9.0
# Unloading
for mod_name in ext_mod_names:
client.unload_extension_module(mod_name)
with pytest.raises(CugraphServiceError):
client.call_extension("my_nines_function", 33, "int32", 21, "float64")
def test_extension_returns_none(client, extension_returns_none):
"""
Ensures an extension that returns None is handled
"""
extension_dir = extension_returns_none
ext_mod_names = client.load_extensions(extension_dir)
result = client.call_extension("my_extension")
assert result is None
# FIXME: much of this test could be in a fixture which ensures the extension
# is unloaded from the server before returning
for mod_name in ext_mod_names:
client.unload_extension_module(mod_name)
def test_get_graph_vertex_data(client_with_property_csvs_loaded):
(client, test_data) = client_with_property_csvs_loaded
# FIXME: do not hardcode the shape values, get them from the input data.
np_array_all_vertex_data = client.get_graph_vertex_data()
assert np_array_all_vertex_data.shape == (9, 9)
# The remaining tests get individual vertex data - compare those to the
# all_vertex_data retrieved earlier.
vert_ids = [11, 86, 89021]
np_array = client.get_graph_vertex_data(vert_ids)
assert np_array.shape == (3, 9)
# The 1st element is the vert ID
for (i, vid) in enumerate(vert_ids):
assert np_array[i][0] == vid
np_array = client.get_graph_vertex_data(11)
assert np_array.shape == (1, 9)
assert np_array[0][0] == 11
np_array = client.get_graph_vertex_data(86)
assert np_array.shape == (1, 9)
assert np_array[0][0] == 86
def test_get_graph_edge_data(client_with_property_csvs_loaded):
(client, test_data) = client_with_property_csvs_loaded
# FIXME: do not hardcode the shape values, get them from the input data.
np_array_all_rows = client.get_graph_edge_data()
assert np_array_all_rows.shape == (17, 11)
# The remaining tests get individual edge data - compare those to the
# all_edge_data retrieved earlier.
edge_ids = [0, 1, 2]
np_array = client.get_graph_edge_data(edge_ids)
assert np_array.shape == (3, 11)
# The 0th element is the edge ID
for (i, eid) in enumerate(edge_ids):
assert np_array[i][0] == eid
np_array = client.get_graph_edge_data(0)
assert np_array.shape == (1, 11)
assert np_array[0][0] == 0
np_array = client.get_graph_edge_data(1)
assert np_array.shape == (1, 11)
assert np_array[0][0] == 1
def test_get_graph_info(client_with_property_csvs_loaded):
(client, test_data) = client_with_property_csvs_loaded
info = client.get_graph_info(["num_vertices", "num_vertex_properties"])
data = (info["num_vertices"], info["num_vertex_properties"])
# FIXME: do not hardcode values, get them from the input data.
assert data == (9, 7)
info = client.get_graph_info(["num_edges", "num_edge_properties"])
data = (info["num_edges"], info["num_edge_properties"])
# FIXME: do not hardcode values, get them from the input data.
assert data == (17, 7)
def test_batched_ego_graphs(client_with_edgelist_csv_loaded):
(client, test_data) = client_with_edgelist_csv_loaded
extracted_gid = client.extract_subgraph()
# These are known vertex IDs in the default graph loaded
seeds = [0, 1, 2]
results_lists = client.batched_ego_graphs(seeds, radius=1, graph_id=extracted_gid)
(srcs, dsts, weights, seeds_offsets) = results_lists
assert isinstance(srcs, Sequence)
assert isinstance(dsts, Sequence)
assert isinstance(weights, Sequence)
assert len(srcs) == len(dsts) == len(weights)
assert isinstance(seeds_offsets, Sequence)
assert len(srcs) == seeds_offsets[-1]
def test_get_edge_IDs_for_vertices(client_with_edgelist_csv_loaded):
(client, test_data) = client_with_edgelist_csv_loaded
extracted_gid = client.extract_subgraph()
srcs = [1, 2, 3]
dsts = [0, 0, 0]
edge_IDs = client.get_edge_IDs_for_vertices(srcs, dsts, graph_id=extracted_gid)
assert len(edge_IDs) == len(srcs)
def test_uniform_neighbor_sampling(client_with_edgelist_csv_loaded):
from cugraph_service_client import defaults
(client, test_data) = client_with_edgelist_csv_loaded
start_list = [1, 2, 3]
fanout_vals = [2, 2, 2]
with_replacement = True
# default graph is a PG, ensure it extracts a subgraph automatically
client.uniform_neighbor_sample(
start_list=start_list,
fanout_vals=fanout_vals,
with_replacement=with_replacement,
graph_id=defaults.graph_id,
)
extracted_gid = client.extract_subgraph(renumber_graph=True)
# Ensure call can be made, assume results verified in other tests
client.uniform_neighbor_sample(
start_list=start_list,
fanout_vals=fanout_vals,
with_replacement=with_replacement,
graph_id=extracted_gid,
)
def test_renumber_vertices_by_type(client_with_property_csvs_loaded):
client, _ = client_with_property_csvs_loaded
re = client.renumber_vertices_by_type(prev_id_column="old_vid")
assert re.start == [0, 5]
assert re.stop == [4, 8]
print(client.get_graph_vertex_data(property_keys=["old_vid"]))
assert client.get_graph_vertex_data(property_keys=["old_vid"])[:, -1].tolist() == [
11,
4,
21,
16,
86,
89021,
32431,
89216,
78634,
]
def test_renumber_edges_by_type(client_with_property_csvs_loaded):
client, _ = client_with_property_csvs_loaded
re = client.renumber_edges_by_type(prev_id_column="old_eid")
assert re.start == [0, 4, 9]
assert re.stop == [3, 8, 16]
print(client.get_graph_edge_data(property_keys=["old_eid"]))
assert client.get_graph_edge_data(property_keys=["old_eid"])[
:, -1
].tolist() == list(range(17))
def test_create_property_graph(client):
old_ids = set(client.get_graph_ids())
pG = client.graph()
assert pG._RemoteGraph__graph_id not in old_ids
new_ids = set(client.get_graph_ids())
assert pG._RemoteGraph__graph_id in new_ids
assert len(old_ids) + 1 == len(new_ids)
del pG
assert set(client.get_graph_ids()) == old_ids
def test_get_server_info(client_with_graph_creation_extension_loaded):
"""
Ensures the server meta-data from get_server_info() is correct. This
includes information about loaded extensions, so the fixture which
pre-loads extensions into the server is used.
"""
client = client_with_graph_creation_extension_loaded
meta_data = client.get_server_info()
assert isinstance(meta_data["num_gpus"], int)
assert Path(meta_data["graph_creation_extensions"][0]).exists()
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/tests/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/tests | rapidsai_public_repos/cugraph/python/cugraph-service/tests/data/transactions.csv | "user_id" "merchant_id" "volume" "time" "card_num" "card_type"
89021 11 33.2 1639084966.5513437 123456 "MC"
89216 4 0 1639085163.481217 8832 "CASH"
78634 16 72.0 1639084912.567394 4321 "DEBIT"
32431 4 103.2 1639084721.354346 98124 "V"
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/tests | rapidsai_public_repos/cugraph/python/cugraph-service/tests/data/referrals.csv | "user_id_1" "user_id_2" "merchant_id" "stars"
89216 78634 11 5
89021 89216 4 4
89021 89216 21 3
89021 89216 11 3
89021 78634 21 4
78634 32431 11 4
78634 89216 21 3
78634 89216 21 4
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/tests | rapidsai_public_repos/cugraph/python/cugraph-service/tests/data/users.csv | "user_id" "user_location" "vertical"
89021 78757 0
32431 78750 1
89216 78757 1
78634 47906 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/tests | rapidsai_public_repos/cugraph/python/cugraph-service/tests/data/merchants.csv | "merchant_id" "merchant_location" "merchant_size" "merchant_sales" "merchant_num_employees" "merchant_name"
11 78750 44 123.2 12 "north"
4 78757 112 234.99 18 "south"
21 44145 83 992.1 27 "east"
16 47906 92 32.43 5 "west"
86 47906 192 2.43 51 "west"
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/tests | rapidsai_public_repos/cugraph/python/cugraph-service/tests/data/relationships.csv | "user_id_1" "user_id_2" "relationship_type"
89216 89021 9
89216 32431 9
32431 78634 8
78634 89216 8
78634 89216 9
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/tests | rapidsai_public_repos/cugraph/python/cugraph-service/tests/data/karate.csv | 1 0 1.0
2 0 1.0
3 0 1.0
4 0 1.0
5 0 1.0
6 0 1.0
7 0 1.0
8 0 1.0
10 0 1.0
11 0 1.0
12 0 1.0
13 0 1.0
17 0 1.0
19 0 1.0
21 0 1.0
31 0 1.0
2 1 1.0
3 1 1.0
7 1 1.0
13 1 1.0
17 1 1.0
19 1 1.0
21 1 1.0
30 1 1.0
3 2 1.0
7 2 1.0
8 2 1.0
9 2 1.0
13 2 1.0
27 2 1.0
28 2 1.0
32 2 1.0
7 3 1.0
12 3 1.0
13 3 1.0
6 4 1.0
10 4 1.0
6 5 1.0
10 5 1.0
16 5 1.0
16 6 1.0
30 8 1.0
32 8 1.0
33 8 1.0
33 9 1.0
33 13 1.0
32 14 1.0
33 14 1.0
32 15 1.0
33 15 1.0
32 18 1.0
33 18 1.0
33 19 1.0
32 20 1.0
33 20 1.0
32 22 1.0
33 22 1.0
25 23 1.0
27 23 1.0
29 23 1.0
32 23 1.0
33 23 1.0
25 24 1.0
27 24 1.0
31 24 1.0
31 25 1.0
29 26 1.0
33 26 1.0
33 27 1.0
31 28 1.0
33 28 1.0
32 29 1.0
33 29 1.0
32 30 1.0
33 30 1.0
32 31 1.0
33 31 1.0
33 32 1.0
0 1 1.0
0 2 1.0
0 3 1.0
0 4 1.0
0 5 1.0
0 6 1.0
0 7 1.0
0 8 1.0
0 10 1.0
0 11 1.0
0 12 1.0
0 13 1.0
0 17 1.0
0 19 1.0
0 21 1.0
0 31 1.0
1 2 1.0
1 3 1.0
1 7 1.0
1 13 1.0
1 17 1.0
1 19 1.0
1 21 1.0
1 30 1.0
2 3 1.0
2 7 1.0
2 8 1.0
2 9 1.0
2 13 1.0
2 27 1.0
2 28 1.0
2 32 1.0
3 7 1.0
3 12 1.0
3 13 1.0
4 6 1.0
4 10 1.0
5 6 1.0
5 10 1.0
5 16 1.0
6 16 1.0
8 30 1.0
8 32 1.0
8 33 1.0
9 33 1.0
13 33 1.0
14 32 1.0
14 33 1.0
15 32 1.0
15 33 1.0
18 32 1.0
18 33 1.0
19 33 1.0
20 32 1.0
20 33 1.0
22 32 1.0
22 33 1.0
23 25 1.0
23 27 1.0
23 29 1.0
23 32 1.0
23 33 1.0
24 25 1.0
24 27 1.0
24 31 1.0
25 31 1.0
26 29 1.0
26 33 1.0
27 33 1.0
28 31 1.0
28 33 1.0
29 32 1.0
29 33 1.0
30 32 1.0
30 33 1.0
31 32 1.0
31 33 1.0
32 33 1.0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/client/pyproject.toml | # Copyright (c) 2022, NVIDIA CORPORATION.
[build-system]
requires = [
"setuptools>=61.0.0",
"wheel",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`.
build-backend = "setuptools.build_meta"
[project]
name = "cugraph-service-client"
dynamic = ["version"]
description = "cuGraph Service client"
readme = { file = "README.md", content-type = "text/markdown" }
authors = [
{ name = "NVIDIA Corporation" },
]
license = { text = "Apache 2.0" }
requires-python = ">=3.9"
dependencies = [
"thriftpy2",
] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`.
classifiers = [
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
]
[project.urls]
Homepage = "https://github.com/rapidsai/cugraph"
Documentation = "https://docs.rapids.ai/api/cugraph/stable/"
[tool.setuptools]
license-files = ["LICENSE"]
[tool.setuptools.dynamic]
version = {file = "cugraph_service_client/VERSION"}
[tool.setuptools.packages.find]
include = [
"cugraph_service_client",
]
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/client/README.md | # cugraph_service
## Description
[RAPIDS](https://rapids.ai) cugraph-service provides an RPC interace to a remote [RAPIDS cuGraph](https://github.com/rapidsai/cugraph) session, allowing users to perform GPU accelerated graph analytics from a remote process. cugraph-service uses cuGraph, cuDF, and other libraries on the server to execute graph data prep and analysis on server-side GPUs. Multiple clients can connect to the server allowing different users and processes the ability to access large graph data that may not otherwise be possible using the client resources.
## <div align="center"><img src="img/cugraph_service_pict.png" width="400px"/></div>
-----
### Quick start
1. Install the cugraph-service conda packages (installing the server package also installs the client):
```
conda install -c rapidsai-nightly -c rapidsai -c conda-forge -c nvidia cugraph-service-server
```
1. Run the server (use --help to see more options)
- To run on a single-GPU:
```
cugraph-service-server
```
- To run on multiple GPUs:
```
cugraph-service-server --start-local-cuda-cluster
```
1. Use the client in your application:
```
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> # check connection to the server, uptime is in seconds
>>> client.uptime()
28
>>> # create a graph from a CSV on the server
>>> graph_id = client.create_graph()
>>> client.get_graph_ids()
[1]
>>> client.load_csv_as_edge_data("karate.csv", dtypes=["int32", "int32", "float32"], vertex_col_names=["src", "dst"], header=0, graph_id=graph_id)
>>> # check the graph info
>>> client.get_graph_info(graph_id=graph_id)
{'num_vertex_properties': 0, 'num_edge_properties': 1, 'is_multi_gpu': 0, 'num_edges': 156, 'num_vertices_from_vertex_data': 0, 'num_vertices': 34}
>>> # run an algo
>>> client.uniform_neighbor_sample(start_list=[0,12], fanout_vals=[2], graph_id=graph_id)
UniformNeighborSampleResult(sources=[0, 0, 12, 12], destinations=[1, 21, 0, 3], indices=[1.0, 1.0, 1.0, 1.0])
>>> # cleanup the graph on the server
>>> client.delete_graph(graph_id)
>>> client.get_graph_ids()
[]
```
### Debugging
#### UCX-Py related variables:
`UCX_TLS` - set the transports to use, in priority order. Example:
```
UCX_TLS=tcp,cuda_copy,cuda_ipc
```
`UCX_TCP_CM_REUSEADDR` - reuse addresses. This can be used to avoid "resource in use" errors during starting/restarting the service repeatedly.
```
UCX_TCP_CM_REUSEADDR=y
```
`UCX_LOG_LEVEL` - set the level for which UCX will output messages to the console. The example below will only output "ERROR" or higher. Set to "DEBUG" to see debug and higher messages.
```
UCX_LOG_LEVEL=ERROR
```
#### UCX performance checks:
Because cugraph-service uses UCX-Py for direct-to-client GPU data transfers when specified, it can be helpful to understand the various UCX performance chacks available to ensure cugraph-service is transfering results as efficiently as the system is capable of.
```
ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 &
ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 localhost
```
```
ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 &
ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 localhost
```
```
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 &
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 16000 localhost
```
```
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 &
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 100 -s 1000000000 localhost
```
```
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 1000000 -s 1000000000 &
CUDA_VISIBLE_DEVICES=0,1 ucx_perftest -m cuda -t tag_bw -n 1000000 -s 1000000000 localhost
```
### Building from source
Build and install the client first, then the server. This is necessary because the server depends on shared modules provided by the client.
```
$> cd cugraph_repo/python/cugraph_service/client
$> python setup.py install
$> cd ../server
$> python setup.py install
```
------
## <div align="left"><img src="img/rapids_logo.png" width="265px"/></div> Open GPU Data Science
The RAPIDS suite of open source software libraries aims to enable execution of end-to-end data science and analytics pipelines entirely on GPUs. It relies on NVIDIA® CUDA® primitives for low-level compute optimization but exposing that GPU parallelism and high-bandwidth memory speed through user-friendly Python interfaces.
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/client/setup.py | # Copyright (c) 2018-2023, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
packages = find_packages(include=["cugraph_service_client*"])
setup(
package_data={key: ["VERSION"] for key in packages},
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service | rapidsai_public_repos/cugraph/python/cugraph-service/client/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 NVIDIA CORPORATION
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/remote_graph.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import importlib
from cugraph_service_client.exceptions import CugraphServiceError
from cugraph_service_client.remote_graph_utils import (
_transform_to_backend_dtype,
_transform_to_backend_dtype_1d,
_offsets_to_backend_dtype,
MissingModule,
)
try:
cudf = importlib.import_module("cudf")
except ModuleNotFoundError:
cudf = MissingModule("cudf")
try:
cupy = importlib.import_module("cupy")
except ModuleNotFoundError:
cupy = MissingModule("cupy")
try:
pandas = importlib.import_module("pandas")
except ModuleNotFoundError:
pandas = MissingModule("pandas")
try:
torch = importlib.import_module("torch")
except ModuleNotFoundError:
torch = MissingModule("torch")
cudf_installed = not isinstance(cudf, MissingModule)
class RemoteGraph:
# column name constants used in internal DataFrames
vertex_col_name = "_VERTEX_"
src_col_name = "_SRC_"
dst_col_name = "_DST_"
type_col_name = "_TYPE_"
edge_id_col_name = "_EDGE_ID_"
weight_col_name = "_WEIGHT_"
_default_type_name = ""
def __init__(
self,
cgs_client,
cgs_graph_id,
):
self.__client = cgs_client
self.__graph_id = cgs_graph_id
self.__vertex_categorical_dtype = None
self.__edge_categorical_dtype = None
def __del__(self):
# Assume if a connection cannot be opened that the service is already
# stopped and the delete call can be skipped.
try:
self.__client.open()
except CugraphServiceError:
return
self.__client.delete_graph(self.__graph_id)
def is_remote(self):
return True
def is_multi_gpu(self):
return self.graph_info["is_multi_gpu"]
def is_bipartite(self):
return False
def is_multipartite(self):
return False
def is_directed(self):
return True
def is_multigraph(self):
return True
def is_weighted(self):
return True
def has_isolated_vertices(self):
raise NotImplementedError("not implemented")
def to_directed(self):
raise NotImplementedError("not implemented")
def to_undirected(self):
raise NotImplementedError("not implemented")
@property
def _vertex_categorical_dtype(self):
if self.__vertex_categorical_dtype is None:
cats = self.vertex_types
self.__vertex_categorical_dtype = {cat: i for i, cat in enumerate(cats)}
return self.__vertex_categorical_dtype
@property
def _edge_categorical_dtype(self):
if self.__edge_categorical_dtype is None:
cats = self.edge_types
self.__edge_categorical_dtype = {cat: i for i, cat in enumerate(cats)}
return self.__edge_categorical_dtype
@property
def graph_info(self):
return self.__client.get_graph_info(graph_id=self.__graph_id)
@property
def has_properties(self):
return (
self.graph_info["num_vertex_properties"] == 0
and self.graph_info["num_edge_properties"] == 0
)
@property
def _graph_id(self):
return self.__graph_id
@property
def _client(self):
return self.__client
def edges(self, backend=("cudf" if cudf_installed else "numpy")):
"""
Parameters
----------
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
Defaults to cudf if available, otherwise falls back to numpy.
Returns
-------
Returns the edge list for this property graph as a dataframe,
array, or tensor containing edge ids, source vertex,
destination vertex, and edge type.
"""
# default edge props include src, dst, edge ID, and edge type
np_edges = self.__client.get_graph_edge_data(
-1,
graph_id=self.__graph_id,
)
# Convert edge type to numeric if necessary
if backend not in ["cudf", "pandas"]:
edge_cat_types = self._edge_categorical_dtype
np_edges[:, 3] = np.array([edge_cat_types[t] for t in np_edges[:, 3]])
cat_dtype = "int32"
else:
cat_dtype_class = (
cudf.CategoricalDtype if backend == "cudf" else pandas.CategoricalDtype
)
cat_dtype = cat_dtype_class(
self._edge_categorical_dtype.keys(), ordered=True
)
return _transform_to_backend_dtype(
np_edges,
[
self.edge_id_col_name,
self.src_col_name,
self.dst_col_name,
self.type_col_name,
],
backend,
dtypes=["int64", "int64", "int64", cat_dtype],
)
@property
def vertex_property_names(self):
"""
Return a Python list of vertex property names.
"""
return self.__client.get_graph_vertex_property_names(self.__graph_id)
@property
def edge_property_names(self):
"""
Return a Python list of edge property names.
"""
return self.__client.get_graph_edge_property_names(self.__graph_id)
@property
def vertex_types(self):
"""The set of vertex type names"""
return set(self.__client.get_graph_vertex_types(self.__graph_id))
@property
def edge_types(self):
"""The set of edge type names"""
return set(self.__client.get_graph_edge_types(self.__graph_id))
def get_num_vertices(self, type=None, *, include_edge_data=True):
"""Return the number of all vertices or vertices of a given type.
Parameters
----------
type : string, optional
If type is None (the default), return the total number of vertices,
otherwise return the number of vertices of the specified type.
include_edge_data : bool (default True)
If True, include vertices that were added in vertex and edge data.
If False, only include vertices that were added in vertex data.
Note that vertices that only exist in edge data are assumed to have
the default type.
See Also
--------
RemotePropertyGraph.get_num_edges
"""
return self.__client.get_num_vertices(type, include_edge_data, self.__graph_id)
def number_of_vertices(self):
return self.get_num_vertices(type=None, include_edge_data=True)
def get_num_edges(self, type=None):
"""Return the number of all edges or edges of a given type.
Parameters
----------
type : string, optional
If type is None (the default), return the total number of edges,
otherwise return the number of edges of the specified type.
See Also
--------
PropertyGraph.get_num_vertices
"""
return self.__client.get_num_edges(type, self.__graph_id)
def number_of_edges(self):
return self.get_num_edges(type=None)
def get_vertices(self, selection=None, backend="cudf"):
"""
Parameters
----------
selection : PropertySelection, optional
A PropertySelection returned from one or more calls to
select_vertices() and/or select_edges()
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
Defaults to cudf if available, otherwise falls back to numpy.
Returns
-------
Return a Series containing the unique vertex IDs contained in both
the vertex and edge property data.
"""
if selection is not None:
raise NotImplementedError(
"Use of get_vertices() with selection"
" not available for remote property graph."
)
vdata = self.__client.get_graph_vertex_data(graph_id=self.__graph_id)[:, 0]
return _transform_to_backend_dtype_1d(
vdata, backend=backend, dtype="int64", series_name=self.vertex_col_name
)
def vertices_ids(self):
"""
Alias for get_vertices()
"""
return self.get_vertices()
def add_vertex_data(
self, dataframe, vertex_col_name, type_name=None, property_columns=None
):
"""
Add a dataframe describing vertex properties to the PropertyGraph.
Parameters
----------
dataframe : DataFrame-compatible instance
A DataFrame instance with a compatible Pandas-like DataFrame
interface.
vertex_col_name : string
The column name that contains the values to be used as vertex IDs.
type_name : string
The name to be assigned to the type of property being added. For
example, if dataframe contains data about users, type_name might be
"users". If not specified, the type of properties will be added as
the empty string, "".
property_columns : list of strings
List of column names in dataframe to be added as properties. All
other columns in dataframe will be ignored. If not specified, all
columns in dataframe are added.
Returns
-------
None
Examples
--------
>>>
"""
raise NotImplementedError("not implemented")
def get_vertex_data(
self,
vertex_ids=None,
types=None,
columns=None,
backend=("cudf" if cudf_installed else "numpy"),
):
"""
Gets a DataFrame containing vertex properties
Parameters
----------
vertex_ids : one or a collection of integers, optional
single, list, slice, pandas array, or series of integers which
are the vertices to include in the returned dataframe
types : str or collection of str, optional
types of the vertices to include in the returned data.
Default is to return all vertex types.
columns : str or list of str, optional
property or properties to include in returned data.
Default includes all properties.
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
Defaults to cudf if available, otherwise falls back to numpy.
Returns
-------
DataFrame
containing vertex properties for only the specified
vertex_ids, columns, and/or types, or all vertex IDs if not specified.
"""
# FIXME expose na handling
if columns is None:
columns = self.vertex_property_names
if vertex_ids is None:
vertex_ids = -1
vertex_data = self.__client.get_graph_vertex_data(
id_or_ids=vertex_ids,
property_keys=columns,
types=types,
graph_id=self.__graph_id,
)
# Convert type to numeric if necessary
if backend not in ["cudf", "pandas"]:
vertex_cat_types = self._vertex_categorical_dtype
vertex_data[:, 1] = np.array(
[vertex_cat_types[t] for t in vertex_data[:, 1]]
)
cat_dtype = "int32"
else:
cat_dtype_class = (
cudf.CategoricalDtype if backend == "cudf" else pandas.CategoricalDtype
)
cat_dtype = cat_dtype_class(
self._vertex_categorical_dtype.keys(), ordered=True
)
columns = set(columns)
if self.type_col_name in columns:
columns.remove(self.type_col_name)
if self.vertex_col_name in columns:
columns.remove(self.vertex_col_name)
column_names = [self.vertex_col_name, self.type_col_name] + list(columns)
return _transform_to_backend_dtype(
vertex_data,
column_names,
backend,
dtypes={self.type_col_name: cat_dtype},
)
def add_edge_data(
self,
dataframe,
vertex_col_names,
edge_id_col_name=None,
type_name=None,
property_columns=None,
):
"""
Add a dataframe describing edge properties to the PropertyGraph.
Parameters
----------
dataframe : DataFrame-compatible instance
A DataFrame instance with a compatible Pandas-like DataFrame
interface.
vertex_col_names : list of strings
The column names that contain the values to be used as the source
and destination vertex IDs for the edges.
edge_id_col_name : string, optional
The column name that contains the values to be used as edge IDs.
If unspecified, edge IDs will be automatically assigned.
Currently, all edge data must be added with the same method: either
with automatically generated IDs, or from user-provided edge IDs.
type_name : string
The name to be assigned to the type of property being added. For
example, if dataframe contains data about transactions, type_name
might be "transactions". If not specified, the type of properties
will be added as the empty string "".
property_columns : list of strings
List of column names in dataframe to be added as properties. All
other columns in dataframe will be ignored. If not specified, all
columns in dataframe are added.
Returns
-------
None
Examples
--------
>>>
"""
raise NotImplementedError("not implemented")
def get_edge_data(
self,
edge_ids=None,
types=None,
columns=None,
backend=("cudf" if cudf_installed else "numpy"),
):
"""
Return a dataframe containing edge properties for only the specified
edge_ids, columns, and/or edge type, or all edge IDs if not specified.
Parameters
----------
edge_ids : int or collection of int, optional
The list of edges to include in the edge data
types : list, optional
List of edge types to include in returned dataframe.
None is the default and will return all edge types.
columns : which edge columns will be returned, optional
None is the default and will result in all columns being returned
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
Defaults to cudf if available, otherwise falls back to numpy.
Returns
-------
Dataframe
Containing edge ids, type edge source, destination
and all the columns specified in the columns parameter
"""
# FIXME expose na handling
base_columns = [
self.edge_id_col_name,
self.src_col_name,
self.dst_col_name,
self.type_col_name,
]
if columns is None:
columns = self.edge_property_names
if edge_ids is None:
ids = -1
else:
ids = edge_ids
edge_data = self.__client.get_graph_edge_data(
id_or_ids=ids,
property_keys=columns,
types=types,
graph_id=self.__graph_id,
)
# Convert edge type to numeric if necessary
if backend not in ["cudf", "pandas"]:
edge_cat_types = self._edge_categorical_dtype
edge_data[:, 3] = np.array([edge_cat_types[t] for t in edge_data[:, 3]])
cat_dtype = "int32"
else:
cat_dtype_class = (
cudf.CategoricalDtype if backend == "cudf" else pandas.CategoricalDtype
)
cat_dtype = cat_dtype_class(
self._edge_categorical_dtype.keys(), ordered=True
)
columns = set(columns)
for c in base_columns:
if c in columns:
columns.remove(c)
column_names = base_columns + list(columns)
return _transform_to_backend_dtype(
edge_data,
column_names,
backend,
dtypes={self.type_col_name: cat_dtype},
)
def select_vertices(self, expr, from_previous_selection=None):
"""
Evaluate expr and return a PropertySelection object representing the
vertices that match the expression.
Parameters
----------
expr : string
A python expression using property names and operators to select
specific vertices.
from_previous_selection : PropertySelection
A PropertySelection instance returned from a prior call to
select_vertices() that can be used to select a subset of vertices
to evaluate the expression against. This allows for a selection of
the intersection of vertices of multiple types (eg. all vertices
that are both type A and type B)
Returns
-------
PropertySelection instance to be used for calls to extract_subgraph()
in order to construct a Graph containing only specific vertices.
Examples
--------
>>>
"""
raise NotImplementedError("not implemented")
def select_edges(self, expr):
"""
Evaluate expr and return a PropertySelection object representing the
edges that match the expression.
Parameters
----------
expr : string
A python expression using property names and operators to select
specific edges.
Returns
-------
PropertySelection instance to be used for calls to extract_subgraph()
in order to construct a Graph containing only specific edges.
Examples
--------
>>>
"""
raise NotImplementedError("not implemented")
def extract_subgraph(
self,
create_using=None,
selection=None,
edge_weight_property=None,
default_edge_weight=None,
check_multi_edges=True,
renumber_graph=True,
add_edge_data=True,
):
"""
Return a subgraph of the overall PropertyGraph containing vertices
and edges that match a selection.
Parameters
----------
create_using : cugraph Graph type or instance, optional
Creates a Graph to return using the type specified. If an instance
is specified, the type of the instance is used to construct the
return Graph, and all relevant attributes set on the instance are
copied to the return Graph (eg. directed). If not specified the
returned Graph will be a directed cugraph.MultiGraph instance.
selection : PropertySelection
A PropertySelection returned from one or more calls to
select_vertices() and/or select_edges(), used for creating a Graph
with only the selected properties. If not speciied the returned
Graph will have all properties. Note, this could result in a Graph
with multiple edges, which may not be supported based on the value
of create_using.
edge_weight_property : string
The name of the property whose values will be used as weights on
the returned Graph. If not specified, the returned Graph will be
unweighted.
check_multi_edges : bool (default is True)
When True and create_using argument is given and not a MultiGraph,
this will perform an expensive check to verify that the edges in
the edge dataframe do not form a multigraph with duplicate edges.
renumber_graph : bool (default is True)
If True, return a Graph that has been renumbered for use by graph
algorithms. If False, the returned graph will need to be manually
renumbered prior to calling graph algos.
add_edge_data : bool (default is True)
If True, add meta data about the edges contained in the extracted
graph which are required for future calls to annotate_dataframe().
Returns
-------
A Graph instance of the same type as create_using containing only the
vertices and edges resulting from applying the selection to the set of
vertex and edge property data.
Examples
--------
>>>
"""
sg_graph_id = self.__client.extract_subgraph(
create_using=create_using,
selection=selection,
edge_weight_property=edge_weight_property,
check_multi_edges=check_multi_edges,
renumber_graph=renumber_graph,
add_edge_data=add_edge_data,
default_edge_weight=default_edge_weight,
graph_id=self.__graph_id,
)
return RemoteGraph(self.__client, sg_graph_id)
def annotate_dataframe(self, df, G, edge_vertex_col_names):
"""
Add properties to df that represent the vertices and edges in graph G.
Parameters
----------
df : cudf.DataFrame or pandas.DataFrame
A DataFrame containing edges identified by edge_vertex_col_names
which will have properties for those edges added to it.
G : cugraph.Graph (or subclass of) instance.
Graph containing the edges specified in df. The Graph instance must
have been generated from a prior call to extract_subgraph() in
order to have the edge meta-data used to look up the correct
properties.
edge_vertex_col_names : tuple of strings
The column names in df that represent the source and destination
vertices, used for identifying edges.
Returns
-------
A copy of df with additional columns corresponding to properties for
the edge in the row.
FIXME: also provide the ability to annotate vertex data.
Examples
--------
>>>
"""
raise NotImplementedError("not implemented")
def renumber_vertices_by_type(
self, prev_id_column=None, backend="cudf" if cudf_installed else "numpy"
):
"""Renumber vertex IDs to be contiguous by type.
Returns a DataFrame with the start and stop IDs for each vertex type.
Stop is *inclusive*.
Parameters
----------
prev_id_column : str, optional
Column name to save the edge ID before renumbering.
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
Defaults to cudf if available, otherwise falls back to numpy.
Returns
-------
"""
offsets = self.__client.renumber_vertices_by_type(
prev_id_column=prev_id_column, graph_id=self.__graph_id
)
return _offsets_to_backend_dtype(offsets, backend)
def renumber_edges_by_type(
self, prev_id_column=None, backend="cudf" if cudf_installed else "numpy"
):
"""Renumber edge IDs to be contiguous by type.
Returns a DataFrame with the start and stop IDs for each edge type.
Stop is *inclusive*.
Parameters
----------
prev_id_column : str, optional
Column name to save the edge ID before renumbering.
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
Defaults to cudf if available, otherwise falls back to numpy.
Returns
A DataFrame or dict (depending on backend) with the start and stop IDs for
each edge type.
Stop is *inclusive*.
-------
"""
offsets = self.__client.renumber_edges_by_type(
prev_id_column=prev_id_column, graph_id=self.__graph_id
)
return _offsets_to_backend_dtype(offsets, backend)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/types.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Optional modules: additional features are enabled if these are present
try:
import numpy
except ModuleNotFoundError:
numpy = None
try:
import cupy
except ModuleNotFoundError:
cupy = None
from cugraph_service_client.cugraph_service_thrift import spec
Value = spec.Value
GraphVertexEdgeID = spec.GraphVertexEdgeID
BatchedEgoGraphsResult = spec.BatchedEgoGraphsResult
Node2vecResult = spec.Node2vecResult
UniformNeighborSampleResult = spec.UniformNeighborSampleResult
Offsets = spec.Offsets
class UnionWrapper:
"""
Provides easy conversions between py objs and Thrift "unions". This is used
as a base class for the "Wrapper" classes below. Together with the derived
classes below, these objects allow the caller to go from py objects/Thrift
unions to Thrift unions/py objects.
"""
non_attrs = set(["default_spec", "thrift_spec", "read", "write"])
class ValueWrapper(UnionWrapper):
"""
Provides an easy-to-use python object for abstracting Thrift "unions",
allowing a python obj to be automatically mapped to the correct union
field.
"""
valid_types = ["int", "float", "str", "bool"]
if numpy:
valid_types += ["numpy.int8", "numpy.int32", "numpy.int64", "numpy.ndarray"]
if cupy:
valid_types += ["cupy.int8", "cupy.int32", "cupy.int64", "cupy.ndarray"]
def __init__(self, val, val_name="value"):
"""
Construct with a value supported by the Value "union". See
cugraph_service_thrift.py
val_name is used for better error messages only, and can be passed for
including in the exception thrown if an invalid type is passed here.
"""
if isinstance(val, Value):
self.union = val
elif isinstance(val, int):
if val < 4294967296:
self.union = Value(int32_value=val)
else:
self.union = Value(int64_value=val)
elif isinstance(val, float):
self.union = Value(double_value=val)
elif (numpy and isinstance(val, (numpy.int8, numpy.int32))) or (
cupy and isinstance(val, (cupy.int8, cupy.int32))
):
self.union = Value(int32_value=int(val))
elif (numpy and isinstance(val, numpy.int64)) or (
cupy and isinstance(val, cupy.int64)
):
self.union = Value(int64_value=int(val))
elif (
(numpy and isinstance(val, numpy.float32))
or (cupy and isinstance(val, cupy.float32))
or (numpy and isinstance(val, numpy.float64))
or (cupy and isinstance(val, cupy.float64))
):
self.union = Value(double_value=float(val))
elif isinstance(val, str):
self.union = Value(string_value=val)
elif isinstance(val, bool):
self.union = Value(bool_value=val)
elif isinstance(val, (list, tuple)):
self.union = Value(list_value=[ValueWrapper(i) for i in val])
# FIXME: Assume ndarrays contain values Thrift can accept! Otherwise,
# check and possibly convert ndarray dtypes.
elif (numpy and isinstance(val, numpy.ndarray)) or (
cupy and isinstance(val, cupy.ndarray)
):
# self.union = Value(list_value=val.tolist())
self.union = Value(list_value=[ValueWrapper(i) for i in val.tolist()])
elif val is None:
self.union = Value()
else:
raise TypeError(
f"{val_name} must be one of the "
f"following types: {self.valid_types}, got "
f"{type(val)}"
)
def __getattr__(self, attr):
"""
Retrieve all other attrs from the underlying Value object. This will
essentially duck-type this ValueWrapper instance and allow it to be
returned to Thrift and treated as a Value.
"""
return getattr(self.union, attr)
def get_py_obj(self):
"""
Get the python object set in the union.
"""
attrs = [
a
for a in dir(self.union)
if not (a.startswith("_")) and a not in self.non_attrs
]
# Much like a C union, only one field will be set. Return the first
# non-None value encountered.
for a in attrs:
val = getattr(self.union, a)
if val is not None:
# Assume all lists are homogeneous. Check the first item to see
# if it is a Value or ValueWrapper obj, and if so recurse.
# FIXME: this might be slow, consider handling lists of numbers
# differently
if isinstance(val, list) and len(val) > 0:
if isinstance(val[0], Value):
return [ValueWrapper(i).get_py_obj() for i in val]
elif isinstance(val[0], ValueWrapper):
return [i.get_py_obj() for i in val]
else:
raise TypeError(
f"expected Value or ValueWrapper, got {type(val)}"
)
else:
return val
return None
class GraphVertexEdgeIDWrapper(UnionWrapper):
def __init__(self, val, val_name="id"):
if isinstance(val, GraphVertexEdgeID):
self.union = val
elif isinstance(val, int):
if val >= 4294967296:
self.union = GraphVertexEdgeID(int64_id=val)
else:
self.union = GraphVertexEdgeID(int32_id=val)
elif isinstance(val, list):
# FIXME: this only check the first item, others could be larger
if val[0] >= 4294967296:
self.union = GraphVertexEdgeID(int64_ids=val)
else:
self.union = GraphVertexEdgeID(int32_ids=val)
else:
raise TypeError(
f"{val_name} must be one of the "
"following types: [int, list<int>], got "
f"{type(val)}"
)
def get_py_obj(self):
"""
Get the python object set in the union.
"""
attrs = [
a
for a in dir(self.union)
if not (a.startswith("_")) and a not in self.non_attrs
]
# Much like a C union, only one field will be set. Return the first
# non-None value encountered.
for a in attrs:
val = getattr(self.union, a)
if val is not None:
return val
return None
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/remote_graph_utils.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import numpy as np
class MissingModule:
"""
Raises RuntimeError when any attribute is accessed on instances of this
class.
Instances of this class are returned by import_optional() when a module
cannot be found, which allows for code to import optional dependencies, and
have only the code paths that use the module affected.
"""
def __init__(self, mod_name):
self.name = mod_name
def __getattr__(self, attr):
raise RuntimeError(f"This feature requires the {self.name} " "package/module")
def import_optional(mod, default_mod_class=MissingModule):
"""
import the "optional" module 'mod' and return the module object or object.
If the import raises ModuleNotFoundError, returns an instance of
default_mod_class.
This method was written to support importing "optional" dependencies so
code can be written to run even if the dependency is not installed.
Example
-------
>> from cugraph.utils import import_optional
>> nx = import_optional("networkx") # networkx is not installed
>> G = nx.Graph()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
...
RuntimeError: This feature requires the networkx package/module
Example
-------
>> class CuDFFallback:
.. def __init__(self, mod_name):
.. assert mod_name == "cudf"
.. warnings.warn("cudf could not be imported, using pandas instead!")
.. def __getattr__(self, attr):
.. import pandas
.. return getattr(pandas, attr)
...
>> from cugraph.utils import import_optional
>> df_mod = import_optional("cudf", default_mod_class=CuDFFallback)
<stdin>:4: UserWarning: cudf could not be imported, using pandas instead!
>> df = df_mod.DataFrame()
>> df
Empty DataFrame
Columns: []
Index: []
>> type(df)
<class 'pandas.core.frame.DataFrame'>
>>
"""
try:
return importlib.import_module(mod)
except ModuleNotFoundError:
return default_mod_class(mod_name=mod)
cudf = import_optional("cudf")
cupy = import_optional("cupy")
pandas = import_optional("pandas")
torch = import_optional("torch")
def _transform_to_backend_dtype_1d(data, series_name=None, backend="numpy", dtype=None):
"""
Supports method-by-method selection of backend type (cupy, cudf, etc.)
to avoid costly conversion such as row-major to column-major transformation.
This method is used for 1-dimensional data, and does not perform unncessary
transpositions or copies.
Note: If using inferred dtypes, the returned series, array, or tensor may
infer a different dtype than what was originally on the server (i.e promotion
of int32 to int64). In the future, the server may also return dtype to prevent
this from occurring.
data : np.ndarray
The raw ndarray that will be transformed to the backend dtype.
series_name : string
The name of the series (only used for dataframe backends).
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
[default = 'numpy']
dtype : ('int32', 'int64', 'float32', etc.)
Optional. The data type to use when storing data in a series or array.
If not set, it will be inferred for dataframe backends, and assumed as float64
for array and tensor backends.
"""
if backend == "numpy":
if dtype == data.dtype:
return data
else:
return np.array(data, dtype=dtype or "float64")
elif backend == "cupy":
return cupy.array(data, dtype=dtype or "float64")
elif backend == "pandas":
return pandas.Series(data, name=series_name, dtype=dtype)
elif backend == "cudf":
return cudf.Series(data, name=series_name, dtype=dtype)
elif backend == "torch":
return torch.tensor(data.astype(dtype=dtype or "float64"))
backend = backend.split(":")
if backend[0] == "torch":
try:
device = int(backend[1])
except ValueError:
device = backend[1]
return torch.tensor(data.astype(dtype=dtype or "float64"), device=device)
raise ValueError(f"invalid backend {backend[0]}")
def _transform_to_backend_dtype(data, column_names, backend="numpy", dtypes=None):
"""
Supports method-by-method selection of backend type (cupy, cudf, etc.)
to avoid costly conversion such as row-major to column-major transformation.
If using an array or tensor backend, this method will likely be followed with
one or more stack() operations to create a matrix or matrices.
Note: If using inferred dtypes, the returned dataframes, arrays, or tensors may
infer a different dtype than what was originally on the server (i.e promotion
of int32 to int64). In the future, the server may also return dtype to prevent
this from occurring.
data : numpy.ndarray
The raw ndarray that will be transformed to the backend type.
column_names : list[string]
The names of the columns, if creating a dataframe.
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
[default = 'numpy']
The data backend to convert the provided data to.
dtypes : ('int32', 'int64', 'float32', etc.)
Optional. The data type to use when storing data in a dataframe or array.
If not set, it will be inferred for dataframe backends, and assumed as float64
for array and tensor backends.
May be a list, or dictionary corresponding to column names. Unspecified
columns in the dictionary will have their dtype inferred. Note: for array
and tensor backends, the inferred type is always 'float64' which will result
in a error for non-numeric inputs.
i.e. ['int32', 'int64', 'int32', 'float64']
i.e. {'col1':'int32', 'col2': 'int64', 'col3': 'float64'}
"""
default_dtype = None if backend in ["cudf", "pandas"] else "float64"
if dtypes is None:
dtypes = [default_dtype] * data.shape[1]
elif isinstance(dtypes, (list, tuple)):
if len(dtypes) != data.shape[1]:
raise ValueError("Datatype array length must match number of columns!")
elif isinstance(dtypes, dict):
dtypes = [
dtypes[name] if name in dtypes else default_dtype for name in column_names
]
else:
raise ValueError("dtypes must be None, a list/tuple, or a dict")
if not isinstance(data, np.ndarray):
raise TypeError("Numpy ndarray expected")
if backend == "cupy":
return [cupy.array(data[:, c], dtype=dtypes[c]) for c in range(data.shape[1])]
elif backend == "numpy":
return [np.array(data[:, c], dtype=dtypes[c]) for c in range(data.shape[1])]
elif backend == "pandas" or backend == "cudf":
from_records = (
pandas.DataFrame.from_records
if backend == "pandas"
else cudf.DataFrame.from_records
)
df = from_records(data, columns=column_names)
for i, t in enumerate(dtypes):
if t is not None:
df[column_names[i]] = df[column_names[i]].astype(t)
return df
elif backend == "torch":
return [
torch.tensor(data[:, c].astype(dtypes[c])) for c in range(data.shape[1])
]
backend = backend.split(":")
if backend[0] == "torch":
try:
device = int(backend[1])
except ValueError:
device = backend[1]
return [
torch.tensor(data[:, c].astype(dtypes[c]), device=device)
for c in range(data.shape[1])
]
raise ValueError(f"invalid backend {backend[0]}")
def _offsets_to_backend_dtype(offsets, backend):
"""
Transforms the offsets object into an appropriate object for the given backend.
Parameters
----------
offsets : cugraph_service_client.types.Offsets
The offsets object to transform.
backend : ('numpy', 'pandas', 'cupy', 'cudf', 'torch', 'torch:<device>')
[default = 'numpy']
The backend the offsets will be transformed to a type of.
Returns
-------
An object of the desired backend.
For cudf: A cudf DataFrame with index=type, start, stop columns
For pandas: A pandas DataFrame with index=type, start, stop columns
For cupy: A dict of {'type': np.ndarray, 'start': cp.ndarray, 'stop': cp.ndarray}
For numpy: A dict of {'type': np.ndarray, 'start': np.ndarray, 'stop': np.ndarray}
For torch: A dict of {'type': np.ndarray, 'start': Tensor, 'stop': Tensor}
"""
if backend == "cudf" or backend == "pandas":
df_clx = cudf.DataFrame if backend == "cudf" else pandas.DataFrame
df = df_clx(
{
"start": offsets.start,
"stop": offsets.stop,
},
index=offsets.type,
)
df.index.name = "type"
return df
if backend == "cupy":
tn_clx = cupy.array
device = None
elif backend == "numpy":
tn_clx = np.array
device = None
elif backend == "torch":
tn_clx = torch.tensor
device = "cpu"
else:
if "torch" not in backend:
raise ValueError(f"Invalid backend {backend}")
tn_clx = torch.tensor
backend = backend.split(":")
try:
device = int(backend[1])
except ValueError:
device = backend[1]
return_dict = {
"type": np.array(offsets.type),
"start": tn_clx(offsets.start),
"stop": tn_clx(offsets.stop),
}
if device is not None:
return_dict["start"] = return_dict["start"].to(device)
return_dict["stop"] = return_dict["stop"].to(device)
return return_dict
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/exceptions.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph_service_client.cugraph_service_thrift import spec
# FIXME: add more fine-grained exceptions!
CugraphServiceError = spec.CugraphServiceError
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/cugraph_service_thrift.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import thriftpy2
from thriftpy2.rpc import make_client
from thriftpy2.protocol import TBinaryProtocolFactory
from thriftpy2.server import TSimpleServer
from thriftpy2.thrift import TProcessor
from thriftpy2.transport import (
TBufferedTransportFactory,
TServerSocket,
TTransportException,
)
# This is the Thrift input file as a string rather than a separate file. This
# allows the Thrift input to be contained within the module that's responsible
# for all Thrift-specific details rather than a separate .thrift file.
#
# thriftpy2 (https://github.com/Thriftpy/thriftpy2) is being used here instead
# of Apache Thrift since it offers an easier-to-use API exclusively for Python
# which is still compatible with servers/cleints using Apache Thrift (Apache
# Thrift can be used from a variety of different languages) while offering
# approximately the same performance.
#
# See the Apache Thrift tutorial for Python for examples:
# https://thrift.apache.org/tutorial/py.html
cugraph_thrift_spec = """
# FIXME: consider additional, more fine-grained exceptions
exception CugraphServiceError {
1:string message
}
struct BatchedEgoGraphsResult {
1:list<i32> src_verts
2:list<i32> dst_verts
3:list<double> edge_weights
4:list<i32> seeds_offsets
}
struct Node2vecResult {
1:list<i32> vertex_paths
2:list<double> edge_weights
3:list<i32> path_sizes
}
# FIXME: uniform_neighbor_sample may need to return indices as ints
# See: https://github.com/rapidsai/cugraph/issues/2654
struct UniformNeighborSampleResult {
1:list<i32> sources
2:list<i32> destinations
3:list<double> indices
}
union GraphVertexEdgeID {
1:i32 int32_id
2:i64 int64_id
3:list<i32> int32_ids
4:list<i64> int64_ids
}
union Value {
1:i32 int32_value
2:i64 int64_value
3:string string_value
4:bool bool_value
5:double double_value
6:list<Value> list_value
}
union Offsets {
1:list<string> type
2:list<i32> start
3:list<i32> stop
}
service CugraphService {
##############################################################################
# Environment management
i32 uptime()
map<string, Value> get_server_info() throws (1:CugraphServiceError e),
list<string> load_graph_creation_extensions(1:string extension_dir_path
) throws (1:CugraphServiceError e),
list<string> load_extensions(1:string extension_dir_path
) throws (1:CugraphServiceError e),
void unload_extension_module(1:string modname) throws (1:CugraphServiceError e),
i32 call_graph_creation_extension(1:string func_name,
2:string func_args_repr,
3:string func_kwargs_repr
) throws (1:CugraphServiceError e),
Value call_extension(1:string func_name,
2:string func_args_repr,
3:string func_kwargs_repr
4:string result_host,
5:i16 result_port
) throws (1:CugraphServiceError e),
##############################################################################
# Graph management
i32 create_graph() throws(1:CugraphServiceError e),
void delete_graph(1:i32 graph_id) throws (1:CugraphServiceError e),
list<i32> get_graph_ids() throws(1:CugraphServiceError e),
map<string, Value> get_graph_info(1:list<string> keys,
2:i32 graph_id
) throws(1:CugraphServiceError e),
void load_csv_as_vertex_data(1:string csv_file_name,
2:string delimiter,
3:list<string> dtypes,
4:i32 header,
5:string vertex_col_name,
6:string type_name,
7:list<string> property_columns,
8:i32 graph_id,
9:list<string> names
) throws (1:CugraphServiceError e),
void load_csv_as_edge_data(1:string csv_file_name,
2:string delimiter,
3:list<string> dtypes,
4:i32 header,
5:list<string> vertex_col_names,
6:string type_name,
7:list<string> property_columns,
8:i32 graph_id,
9:list<string> names,
10:string edge_id_col_name
) throws (1:CugraphServiceError e),
list<i32> get_edge_IDs_for_vertices(1:list<i32> src_vert_IDs,
2:list<i32> dst_vert_IDs,
3:i32 graph_id
) throws (1:CugraphServiceError e),
Offsets
renumber_vertices_by_type(1:string prev_id_column,
2:i32 graph_id
) throws (1:CugraphServiceError e),
Offsets
renumber_edges_by_type(1:string prev_id_column,
2:i32 graph_id
) throws (1:CugraphServiceError e),
i32 extract_subgraph(1:string create_using,
2:string selection,
3:string edge_weight_property,
4:double default_edge_weight,
5:bool check_multi_edges,
6:bool renumber_graph,
7:bool add_edge_data,
8:i32 graph_id
) throws (1:CugraphServiceError e),
binary get_graph_vertex_data(1:GraphVertexEdgeID vertex_id,
2:Value null_replacement_value,
3:list<string> property_keys,
4:list<string> types,
5:i32 graph_id
) throws (1:CugraphServiceError e),
binary get_graph_edge_data(1:GraphVertexEdgeID edge_id,
2:Value null_replacement_value,
3:list<string> property_keys,
4:list<string> types,
5:i32 graph_id,
) throws (1:CugraphServiceError e),
bool is_vertex_property(1:string property_key,
2:i32 graph_id) throws (1:CugraphServiceError e),
bool is_edge_property(1:string property_key,
2:i32 graph_id) throws (1:CugraphServiceError e),
list<string> get_graph_vertex_property_names(1:i32 graph_id)
throws (1:CugraphServiceError e),
list<string> get_graph_edge_property_names(1:i32 graph_id)
throws (1:CugraphServiceError e),
list<string> get_graph_vertex_types(1:i32 graph_id)
throws (1:CugraphServiceError e),
list<string> get_graph_edge_types(1:i32 graph_id)
throws (1:CugraphServiceError e),
i64 get_num_vertices(1:string vertex_type,
2:bool include_edge_data,
3:i32 graph_id) throws (1:CugraphServiceError e),
i64 get_num_edges(1:string edge_type,
2:i32 graph_id) throws (1:CugraphServiceError e),
##############################################################################
# Algos
BatchedEgoGraphsResult
batched_ego_graphs(1:list<i32> seeds,
2:i32 radius,
3:i32 graph_id
) throws (1:CugraphServiceError e),
Node2vecResult
node2vec(1:list<i32> start_vertices,
2:i32 max_depth,
3:i32 graph_id
) throws (1:CugraphServiceError e),
UniformNeighborSampleResult
uniform_neighbor_sample(1:list<i32> start_list,
2:list<i32> fanout_vals,
3:bool with_replacement,
4:i32 graph_id,
5:string result_host,
6:i16 result_port
) throws (1:CugraphServiceError e),
##############################################################################
# Test/Debug
i32 create_test_array(1:i64 nbytes
) throws (1:CugraphServiceError e),
void delete_test_array(1:i32 test_array_id) throws (1:CugraphServiceError e),
list<byte> receive_test_array(1:i32 test_array_id
) throws (1:CugraphServiceError e),
oneway void receive_test_array_to_device(1:i32 test_array_id,
2:string result_host,
3:i16 result_port
) throws (1:CugraphServiceError e),
string get_graph_type(1:i32 graph_id) throws(1:CugraphServiceError e),
}
"""
# Load the cugraph Thrift specification on import. Syntax errors and other
# problems will be apparent immediately on import, and it allows any other
# module to import this and access the various types defined in the Thrift
# specification without being exposed to the thriftpy2 API.
spec = thriftpy2.load_fp(io.StringIO(cugraph_thrift_spec), module_name="cugraph_thrift")
def create_server(handler, host, port, client_timeout=90000):
"""
Return a server object configured to listen on host/port and use the
handler object to handle calls from clients. The handler object must have
an interface compatible with the CugraphService service defined in the
Thrift specification.
Note: This function is defined here in order to allow it to have easy
access to the Thrift spec loaded here on import, and to keep all thriftpy2
calls in this module. However, this function is likely only called from the
cugraph_service_server package which depends on the code in this package.
"""
proto_factory = TBinaryProtocolFactory()
trans_factory = TBufferedTransportFactory()
client_timeout = client_timeout
processor = TProcessor(spec.CugraphService, handler)
server_socket = TServerSocket(host=host, port=port, client_timeout=client_timeout)
server = TSimpleServer(
processor,
server_socket,
iprot_factory=proto_factory,
itrans_factory=trans_factory,
)
return server
def create_client(host, port, call_timeout=90000):
"""
Return a client object that will make calls on a server listening on
host/port.
The call_timeout value defaults to 90 seconds, and is used for setting the
timeout for server API calls when using the client created here - if a call
does not return in call_timeout milliseconds, an exception is raised.
"""
try:
return make_client(
spec.CugraphService, host=host, port=port, timeout=call_timeout
)
except TTransportException:
# Raise a CugraphServiceError in order to completely encapsulate all
# Thrift details in this module. If this was not done, callers of this
# function would have to import thriftpy2 in order to catch the
# TTransportException, which then leaks thriftpy2.
#
# NOTE: normally the CugraphServiceError exception is imported from the
# cugraph_service_client.exceptions module, but since
# cugraph_service_client.exceptions.CugraphServiceError is actually
# defined from the spec in this module, just use it directly from spec.
#
# FIXME: may need to have additional thrift exception handlers
# FIXME: this exception being raised could use more detail
raise spec.CugraphServiceError(
"could not create a client session with a cugraph_service server"
)
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/_version.py | # Copyright (c) 2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib.resources
# Read VERSION file from the module that is symlinked to VERSION file
# in the root of the repo at build time or copied to the moudle at
# installation. VERSION is a separate file that allows CI build-time scripts
# to update version info (including commit hashes) without modifying
# source files.
__version__ = (
importlib.resources.files("cugraph_service_client")
.joinpath("VERSION")
.read_text()
.strip()
)
__git_commit__ = ""
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/__init__.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# constants used by both client and server
# (the server package depends on the client so server code can share client
# code/utilities/defaults/etc.)
supported_extension_return_dtypes = [
"NoneType",
"int8",
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
]
# make a bi-directional mapping between type strings and ints. This is used for
# sending dtype meta-data between client and server.
extension_return_dtype_map = dict(enumerate(supported_extension_return_dtypes))
extension_return_dtype_map.update(
dict(map(reversed, extension_return_dtype_map.items()))
)
from cugraph_service_client.client import CugraphServiceClient
from cugraph_service_client.remote_graph import RemoteGraph
from cugraph_service_client._version import __git_commit__, __version__
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/VERSION | 23.12.00
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/defaults.py | # Copyright (c) 2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
host = "localhost"
port = 9090
results_port = 9091
graph_id = 0
| 0 |
rapidsai_public_repos/cugraph/python/cugraph-service/client | rapidsai_public_repos/cugraph/python/cugraph-service/client/cugraph_service_client/client.py | # Copyright (c) 2022-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cugraph_service_client.remote_graph_utils import import_optional, MissingModule
import numpy as np
from functools import wraps
from collections.abc import Sequence
import pickle
import ucp
import asyncio
import threading
from cugraph_service_client import defaults
from cugraph_service_client.remote_graph import RemoteGraph
from cugraph_service_client import extension_return_dtype_map
from cugraph_service_client.types import (
ValueWrapper,
GraphVertexEdgeID,
UniformNeighborSampleResult,
)
from cugraph_service_client.cugraph_service_thrift import create_client
cp = import_optional("cupy")
cudf = import_optional("cudf")
pandas = import_optional("pandas")
cupy_installed = not isinstance(cp, MissingModule)
cudf_installed = not isinstance(cudf, MissingModule)
pandas_installed = not isinstance(pandas, MissingModule)
class RunAsyncioThread(threading.Thread):
"""
This class provides a thread whose purpose is to start a new
event loop and call the provided function.
"""
def __init__(self, func, args, kwargs):
"""
Parameters
----------
func : function
The function that will be run.
*args : args
The arguments to the given function.
**kwargs : kwargs
The keyword arguments to the given function.
"""
self.func = func
self.args = args
self.kwargs = kwargs
self.result = None
super().__init__()
def run(self):
"""
Runs this thread's previously-provided function inside
a new event loop. Returns the result.
Returns
-------
The returned object of the previously-provided function.
"""
self.result = asyncio.run(self.func(*self.args, **self.kwargs))
def run_async(func, *args, **kwargs):
"""
If no loop is running on the current thread,
this method calls func using a new event
loop using asyncio.run. If a loop is running, this
method starts a new thread, and calls func on a new
event loop in the new thread.
Parameters
----------
func : function
The function that will be run.
*args : args
The arguments to the given function.
**kwargs : kwargs
The keyword arguments to the given function.
Returns
-------
The output of the given function.
"""
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop is not None:
thread = RunAsyncioThread(func, args, kwargs)
thread.start()
thread.join()
return thread.result
else:
return asyncio.run(func(*args, **kwargs))
class DeviceArrayAllocator:
"""
This class is used to create a callable instance for allocating a cupy
array on a specific device. It is constructed with a particular device
number, and can be called repeatedly with the number of bytes to allocate,
returning an array of the requested size on the device.
"""
def __init__(self, device):
self.device = device
def __call__(self, nbytes):
with cp.cuda.Device(self.device):
a = cp.empty(nbytes, dtype="uint8")
return a
class CugraphServiceClient:
"""
Client object for cugraph_service, which defines the API that clients can
use to access the cugraph_service server.
"""
def __init__(
self, host=defaults.host, port=defaults.port, results_port=defaults.results_port
):
"""
Creates a connection to a cugraph_service server running on host/port.
Parameters
----------
host : string, defaults to 127.0.0.1
Hostname where the cugraph_service server is running
port : int, defaults to 9090
Port number where the cugraph_service server is listening
Returns
-------
CugraphServiceClient object
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
"""
self.host = host
self.port = port
self.results_port = results_port
self.__client = None
# If True, do not automatically close a server connection upon
# completion or error of a server API call. This requires the caller to
# manually call close() when done.
self.hold_open = False
def __del__(self):
self.close()
def __server_connection(method):
"""
Decorator for methods that require a connection to the server to be
created prior to calling a server function, then closed upon completion
or error. If self.hold_open is True, the automatic call to close() will
not take place, allowing for multiple subsequent server calls to be
made using the same connection. self.hold_open therefore requires the
caller to manually call close() in order to allow other clients to
connect.
"""
@wraps(method)
def wrapped_method(self, *args, **kwargs):
self.open()
try:
ret_val = method(self, *args, **kwargs)
finally:
if not self.hold_open:
self.close()
return ret_val
return wrapped_method
def open(self, call_timeout=900000):
"""
Opens a connection to the server at self.host/self.port if one is not
already established. close() must be called in order to allow other
connections from other clients to be made.
This call does nothing if a connection to the server is already open.
Note: all APIs that access the server will call this method
automatically, followed automatically by a call to close(), so calling
this method should not be necessary. close() is not automatically
called if self.hold_open is False.
Parameters
----------
call_timeout : int (default is 900000)
Time in millisecods that calls to the server using this open
connection must return by.
Returns
-------
None
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> # Manually open a connection. The connection is held open and other
>>> # clients cannot connect until a client API call completes or
>>> # close() is manually called.
>>> client.open()
"""
if self.__client is None:
self.__client = create_client(
self.host, self.port, call_timeout=call_timeout
)
def close(self):
"""
Closes a connection to the server if one has been established, allowing
other clients to access the server. This method is called automatically
for all APIs that access the server if self.hold_open is False.
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> # Have the client hold open the connect automatically opened as
>>> # part of a server API call until close() is called. This is
>>> # normally not necessary and shown here for demonstration purposes.
>>> client.hold_open = True
>>> client.node2vec([0,1], 2)
>>> # close the connection so other clients can connect
>>> client.close()
>>> # go back to automatic open/close mode (safer)
>>> client.hold_open = False
"""
if self.__client is not None:
self.__client.close()
self.__client = None
###########################################################################
# Environment management
@__server_connection
def uptime(self):
"""
Return the server uptime in seconds. This is often used as a "ping".
Parameters
----------
None
Returns
-------
uptime : int
The time in seconds the server has been running.
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> client.uptime()
>>> 32
"""
return self.__client.uptime()
@__server_connection
def get_server_info(self):
"""
Return a dictionary of information about the server.
Parameters
----------
None
Returns
-------
server_info : dict
Dictionary containing environment and state information about the
server.
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> client.get_server_info()
>>> {'num_gpus': 2}
"""
server_info = self.__client.get_server_info()
# server_info is a dictionary of Value objects ("union" types returned
# from the server), so convert them to simple py types.
return dict((k, ValueWrapper(server_info[k]).get_py_obj()) for k in server_info)
@__server_connection
def load_graph_creation_extensions(self, extension_dir_path):
"""
Loads the extensions for graph creation present in the directory
specified by extension_dir_path.
Parameters
----------
extension_dir_path : string
Path to the directory containing the extension files (.py source
files). This directory must be readable by the server.
Returns
-------
extension_modnames : list
List of the module names loaded. These can be used in calls to
unload_extension_module()
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> extension_modnames = client.load_graph_creation_extensions(
... "/some/server/side/directory")
>>>
"""
return self.__client.load_graph_creation_extensions(extension_dir_path)
@__server_connection
def load_extensions(self, extension_dir_or_mod_path):
"""
Loads the extensions present in the directory (path on disk), or module or
package path (as used in an import statement) specified by
extension_dir_or_mod_path.
Parameters
----------
extension_dir_or_mod_path : string
Path to the directory containing the extension files (.py source
files), or an importable module or package path (eg. my.package or
my.package.module). If a directory is specified it must be readable
by the server, and if a module or package path is specified it must
be importable by the server (ie. present in the sys.path of the
running server).
Returns
-------
extension_modnames : list
List of the module names loaded as paths to files on disk. These can
be used in calls to unload_extension_module()
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> extension_modnames = client.load_graph_creation_extensions(
... "/some/server/side/directory")
>>> more_extension_modnames = client.load_graph_creation_extensions(
... "my_project.extensions.etl")
"""
return self.__client.load_extensions(extension_dir_or_mod_path)
@__server_connection
def unload_extension_module(self, modname):
"""
Removes all extensions contained in the modname module.
Parameters
----------
modname : string
Name of the module to be unloaded. All extension functions contained in
modname will no longer be callable.
Returns
-------
None
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> ext_mod_name = client.load_graph_creation_extensions(
... "/some/server/side/directory")
>>> client.unload_extension_module(ext_mod_name)
>>>
"""
return self.__client.unload_extension_module(modname)
@__server_connection
def call_graph_creation_extension(self, func_name, *func_args, **func_kwargs):
"""
Calls a graph creation extension on the server that was previously
loaded by a prior call to load_graph_creation_extensions(), then
returns the graph ID of the graph created by the extension.
Parameters
----------
func_name : string
The name of the server-side extension function loaded by a prior
call to load_graph_creation_extensions(). All graph creation
extension functions are expected to return a new graph.
*func_args : string, int, list, dictionary (optional)
The positional args to pass to func_name. Note that func_args are
converted to their string representation using repr() on the
client, then restored to python objects on the server using eval(),
and therefore only objects that can be restored server-side with
eval() are supported.
**func_kwargs : string, int, list, dictionary
The keyword args to pass to func_name. Note that func_kwargs are
converted to their string representation using repr() on the
client, then restored to python objects on the server using eval(),
and therefore only objects that can be restored server-side with
eval() are supported.
Returns
-------
graph_id : int
unique graph ID
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> # Load the extension file containing "my_complex_create_graph()"
>>> client.load_graph_creation_extensions("/some/server/side/dir")
>>> new_graph_id = client.call_graph_creation_extension(
... "my_complex_create_graph",
... "/path/to/csv/on/server/graph.csv",
... clean_data=True)
>>>
"""
func_args_repr = repr(func_args)
func_kwargs_repr = repr(func_kwargs)
return self.__client.call_graph_creation_extension(
func_name, func_args_repr, func_kwargs_repr
)
@__server_connection
def call_extension(
self,
func_name,
*func_args,
result_device=None,
**func_kwargs,
):
"""
Calls an extension on the server that was previously loaded by a prior
call to load_extensions(), then returns the result returned by the
extension.
Parameters
----------
func_name : string
The name of the server-side extension function loaded by a prior
call to load_graph_creation_extensions(). All graph creation
extension functions are expected to return a new graph.
*func_args : string, int, list, dictionary (optional)
The positional args to pass to func_name. Note that func_args are
converted to their string representation using repr() on the
client, then restored to python objects on the server using eval(),
and therefore only objects that can be restored server-side with
eval() are supported.
**func_kwargs : string, int, list, dictionary The keyword args to pass
to func_name. func_kwargs are converted to their string
representation using repr() on the client, then restored to python
objects on the server using eval(), and therefore only objects that
can be restored server-side with eval() are supported.
result_device is reserved for use in specifying an optional GPU
device ID to have the server transfer results to.
result_device : int, default is None
If specified, must be the integer ID of a GPU device to have the
server transfer results to as one or more cupy ndarrays
Returns
-------
result : python int, float, string, list
The result returned by the extension
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> # Load the extension file containing "my_serverside_function()"
>>> client.load_extensions("/some/server/side/dir")
>>> result = client.call_extension(
... "my_serverside_function", 33, 22, "some_string")
>>>
"""
func_args_repr = repr(func_args)
func_kwargs_repr = repr(func_kwargs)
if result_device is not None:
result_obj = asyncio.run(
self.__call_extension_to_device(
func_name, func_args_repr, func_kwargs_repr, result_device
)
)
# result_obj is a cupy array or tuple of cupy arrays on result_device
return result_obj
else:
result_obj = self.__client.call_extension(
func_name,
func_args_repr,
func_kwargs_repr,
client_host=None,
client_result_port=None,
)
# Convert the structure returned from the RPC call to a python type
# FIXME: ValueWrapper ctor and get_py_obj are recursive and could be slow,
# especially if Value is a list. Consider returning the Value obj as-is.
return ValueWrapper(result_obj).get_py_obj()
###########################################################################
# Graph management
@__server_connection
def create_graph(self):
"""
Create a new graph associated with a new (non-default) unique graph ID,
return the new graph ID.
Parameters
----------
None
Returns
-------
graph_id : int
unique graph ID
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> my_graph_id = client.create_graph()
>>> # Load a CSV to the new graph
>>> client.load_csv_as_edge_data(
... "edges.csv", ["int32", "int32", "float32"],
... vertex_col_names=["src", "dst"], graph_id=my_graph_id)
>>>
"""
return self.__client.create_graph()
@__server_connection
def delete_graph(self, graph_id):
"""
Deletes the graph referenced by graph_id.
Parameters
----------
graph_id : int
The graph ID to delete. If the ID passed is not valid on the
server, CugraphServiceError is raised.
Returns
-------
None
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> my_graph_id = client.create_graph()
>>> # Load a CSV to the new graph
>>> client.load_csv_as_edge_data(
... "edges.csv", ["int32", "int32", "float32"],
... vertex_col_names=["src", "dst"], graph_id=my_graph_id)
>>> # Remove the graph instance on the server and reclaim the memory
>>> client.delete_graph(my_graph_id)
"""
return self.__client.delete_graph(graph_id)
def graph(self):
"""
Constructs a new RemoteGraph object wrapping a remote PropertyGraph.
"""
return RemoteGraph(self, self.create_graph())
@__server_connection
def get_graph_ids(self):
"""
Returns a list of all graph IDs the server is currently maintaining.
Parameters
----------
None
Returns
-------
graph_id_list : list of unique int graph IDs
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> # This server already has graphs loaded from other sessions
>>> client.get_graph_ids()
[0, 26]
>>>
"""
return self.__client.get_graph_ids()
@__server_connection
def get_graph_info(self, keys=None, graph_id=defaults.graph_id):
"""
Returns a dictionary containing meta-data about the graph referenced by
graph_id (or the default graph if not specified).
Parameters
----------
graph_id : int, default is defaults.graph_id
The graph ID to apply the properties in the CSV to. If not provided
the default graph ID is used.
Returns
-------
None
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> client.load_csv_as_vertex_data(
... "/server/path/to/vertex_data.csv",
... dtypes=["int32", "string", "int32"],
... vertex_col_name="vertex_id",
... header="infer")
>>> client.get_graph_info()
{'num_edges': 3, 'num_vertices': 4}
"""
# Ensure keys is a list of strings when passing to RPC API
if keys is None:
keys = []
elif isinstance(keys, str):
keys = [keys]
elif isinstance(keys, list):
if False in [isinstance(k, str) for k in keys]:
raise TypeError(f"keys must be a list of strings, got {keys}")
else:
raise TypeError(
"keys must be a string or list of strings, got " f"{type(keys)}"
)
graph_info = self.__client.get_graph_info(keys, graph_id)
# special case: if only one key was specified, return only the single
# value
if len(keys) == 1:
return ValueWrapper(graph_info[keys[0]]).get_py_obj()
# graph_info is a dictionary of Value objects ("union" types returned
# from the graph), so convert them to simple py types.
return dict((k, ValueWrapper(graph_info[k]).get_py_obj()) for k in graph_info)
@__server_connection
def load_csv_as_vertex_data(
self,
csv_file_name,
dtypes,
vertex_col_name,
delimiter=" ",
header=None,
type_name="",
property_columns=None,
graph_id=defaults.graph_id,
names=None,
):
"""
Reads csv_file_name and applies it as vertex data to the graph
identified as graph_id (or the default graph if not specified).
Parameters
----------
csv_file_name : string
Path to CSV file on the server
dtypes : list of strings
Types for the columns in the CSV file
vertex_col_name : string
Name of the column to use as the vertex ID
delimiter : string, default is " "
Character that serves as the delimiter between columns in the CSV
header : int, default is None
Row number to use as the column names. Default behavior is to
assume column names are explicitely provided (header=None).
header="infer" if the column names are to be inferred. If no names
are passed, header=0. See also cudf.read_csv
type_name : string, default is ""
The vertex property "type" the CSV data is describing. For
instance, CSV data describing properties for "users" might pass
type_name as "user". A vertex property type is optional.
property_columns : list of strings, default is None
The column names in the CSV to add as vertex properties. If None,
all columns will be added as properties.
graph_id : int, default is defaults.graph_id
The graph ID to apply the properties in the CSV to. If not provided
the default graph ID is used.
names: list of strings, default is None
The names to be used to reference the CSV columns, in lieu of a
header.
Returns
-------
None
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> client.load_csv_as_vertex_data(
... "/server/path/to/vertex_data.csv",
... dtypes=["int32", "string", "int32"],
... vertex_col_name="vertex_id",
... header="infer")
>>>
"""
# Map all int arg types that also have string options to ints
# FIXME: check for invalid header arg values
if header == "infer":
header = -1
elif header is None:
header = -2
return self.__client.load_csv_as_vertex_data(
csv_file_name,
delimiter,
dtypes,
header,
vertex_col_name,
type_name,
property_columns or [],
graph_id,
names or [],
)
@__server_connection
def load_csv_as_edge_data(
self,
csv_file_name,
dtypes,
vertex_col_names,
delimiter=" ",
header=None,
type_name="",
property_columns=None,
edge_id_col_name=None,
graph_id=defaults.graph_id,
names=None,
):
"""
Reads csv_file_name and applies it as edge data to the graph identified
as graph_id (or the default graph if not specified).
Parameters
----------
csv_file_name : string
Path to CSV file on the server
dtypes : list of strings
Types for the columns in the CSV file
vertex_col_names : tuple of strings
Names of the columns to use as the source and destination vertex
IDs defining the edges
delimiter : string, default is " "
Character that serves as the delimiter between columns in the CSV
header : int, default is None
Row number to use as the column names. Default behavior is to
assume column names are explicitely provided (header=None).
header="infer" if the column names are to be inferred. If no names
are passed, header=0. See also cudf.read_csv
type_name : string, default is ""
The edge property "type" the CSV data is describing. For instance,
CSV data describing properties for "transactions" might pass
type_name as "transaction". An edge property type is optional.
property_columns : list of strings, default is None
The column names in the CSV to add as edge properties. If None, all
columns will be added as properties.
edge_id_col_name : string, optional
The column name that contains the values to be used as edge IDs.
If unspecified, edge IDs will be automatically assigned.
Currently, all edge data must be added with the same method: either
with automatically generated IDs, or from user-provided edge IDs.
graph_id : int, default is defaults.graph_id
The graph ID to apply the properties in the CSV to. If not provided
the default graph ID is used.
names: list of strings, default is None
The names to be used to reference the CSV columns, in lieu of a
header.
Returns
-------
None
Examples
--------
>>> from cugraph_service_client import CugraphServiceClient
>>> client = CugraphServiceClient()
>>> client.load_csv_as_edge_data(
... "/server/path/to/edge_data.csv",
... dtypes=["int32", "int32", "string", "int32"],
... vertex_col_names=("src", "dst"),
... header="infer")
>>>
"""
# Map all int arg types that also have string options to ints
# FIXME: check for invalid header arg values
if header == "infer":
header = -1
elif header is None:
header = -2
return self.__client.load_csv_as_edge_data(
csv_file_name,
delimiter,
dtypes,
header,
vertex_col_names,
type_name,
property_columns or [],
graph_id,
names or [],
edge_id_col_name or "",
)
@__server_connection
def get_edge_IDs_for_vertices(
self, src_vert_IDs, dst_vert_IDs, graph_id=defaults.graph_id
):
""" """
# FIXME: finish docstring above
# FIXME: add type checking
return self.__client.get_edge_IDs_for_vertices(
src_vert_IDs, dst_vert_IDs, graph_id
)
@__server_connection
def renumber_vertices_by_type(
self, prev_id_column=None, graph_id=defaults.graph_id
):
"""
Renumbers the vertices in the graph referenced by graph id to be contiguous
by vertex type. Returns the start and end vertex id ranges for each type.
"""
if prev_id_column is None:
prev_id_column = ""
return self.__client.renumber_vertices_by_type(prev_id_column, graph_id)
@__server_connection
def renumber_edges_by_type(self, prev_id_column=None, graph_id=defaults.graph_id):
"""
Renumbers the edges in the graph referenced by graph id to be contiguous
by edge type. Returns the start and end edge id ranges for each type.
"""
if prev_id_column is None:
prev_id_column = ""
return self.__client.renumber_edges_by_type(prev_id_column, graph_id)
@__server_connection
def extract_subgraph(
self,
create_using=None,
selection=None,
edge_weight_property="",
default_edge_weight=1.0,
check_multi_edges=True,
renumber_graph=True,
add_edge_data=True,
graph_id=defaults.graph_id,
):
"""
Return a graph ID for a subgraph of the graph referenced by graph_id
that containing vertices and edges that match a selection.
Parameters
----------
create_using : string, default is None
String describing the type of Graph object to create from the
selected subgraph of vertices and edges. The default (None) results
in a directed cugraph.MultiGraph object.
selection : int, default is None
A PropertySelection ID returned from one or more calls to
select_vertices() and/or select_edges(), used for creating a Graph
with only the selected properties. If not speciied the resulting
Graph will have all properties. Note, this could result in a Graph
with multiple edges, which may not be supported based on the value
of create_using.
edge_weight_property : string, default is ""
The name of the property whose values will be used as weights on
the returned Graph. If not specified, the returned Graph will be
unweighted.
default_edge_weight : float, default is 1.0
The value to use when an edge property is specified but not present
on an edge.
check_multi_edges : bool (default is True)
When True and create_using argument is given and not a MultiGraph,
this will perform an expensive check to verify that the edges in
the edge dataframe do not form a multigraph with duplicate edges.
graph_id : int, default is defaults.graph_id
The graph ID to extract the subgraph from. If the ID passed is not
valid on the server, CugraphServiceError is raised.
Returns
-------
A graph ID for a new Graph instance of the same type as create_using
containing only the vertices and edges resulting from applying the
selection to the set of vertex and edge property data.
Examples
--------
>>>
"""
# FIXME: finish docstring above
# FIXME: convert defaults to type needed by the Thrift API. These will
# be changing to different types.
create_using = create_using or ""
selection = selection or ""
return self.__client.extract_subgraph(
create_using,
selection,
edge_weight_property,
default_edge_weight,
check_multi_edges,
renumber_graph,
add_edge_data,
graph_id,
)
@__server_connection
def get_graph_vertex_data(
self,
id_or_ids=-1,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=defaults.graph_id,
):
"""
Returns ...
Parameters
----------
id_or_ids : int or list of ints (default -1)
null_replacement_value : number or string (default 0)
property_keys : list of strings (default [])
The keys (names) of properties to retrieve. If omitted, returns
the whole dataframe.
types : list of strings (default [])
The vertex types to include in the query. If ommitted, returns
properties for all types.
graph_id : int, default is defaults.graph_id
The graph ID to extract the subgraph from. If the ID passed is not
valid on the server, CugraphServiceError is raised.
Returns
-------
Examples
--------
>>>
"""
# FIXME: finish docstring above
vertex_edge_id_obj = self.__get_vertex_edge_id_obj(id_or_ids)
null_replacement_value_obj = ValueWrapper(
null_replacement_value, val_name="null_replacement_value"
).union
ndarray_bytes = self.__client.get_graph_vertex_data(
vertex_edge_id_obj,
null_replacement_value_obj,
property_keys or [],
types or [],
graph_id,
)
return pickle.loads(ndarray_bytes)
@__server_connection
def get_graph_edge_data(
self,
id_or_ids=-1,
null_replacement_value=0,
property_keys=None,
types=None,
graph_id=defaults.graph_id,
):
"""
Returns ...
Parameters
----------
id_or_ids : int or list of ints (default -1)
null_replacement_value : number or string (default 0)
property_keys : list of strings (default [])
The keys (names) of properties to retrieve. If omitted, returns
the whole dataframe.
types : list of strings (default [])
The types of edges to include in the query. If ommitted, returns
data for all edge types.
graph_id : int, default is defaults.graph_id
The graph ID to extract the subgraph from. If the ID passed is not
valid on the server, CugraphServiceError is raised.
Returns
-------
Examples
--------
>>>
"""
# FIXME: finish docstring above
vertex_edge_id_obj = self.__get_vertex_edge_id_obj(id_or_ids)
null_replacement_value_obj = ValueWrapper(
null_replacement_value, val_name="null_replacement_value"
).union
ndarray_bytes = self.__client.get_graph_edge_data(
vertex_edge_id_obj,
null_replacement_value_obj,
property_keys or [],
types or [],
graph_id,
)
return pickle.loads(ndarray_bytes)
@__server_connection
def is_vertex_property(self, property_key, graph_id=defaults.graph_id):
"""
Returns True if the given property key is for a valid vertex property
in the given graph, False otherwise.
Parameters
----------
property_key: string
The key (name) of the vertex property to check
graph_id: int
The id of the graph of interest
"""
return self.__client.is_vertex_property(property_key, graph_id)
@__server_connection
def is_edge_property(self, property_key, graph_id=defaults.graph_id):
"""
Returns True if the given property key is for a valid vertex property
in the given graph, false otherwise.e
Parameters
----------
property_key: string
The key (name) of the vertex property to check
graph_id: int
The id of the graph of interest
"""
return self.__client.is_edge_property(property_key, graph_id)
@__server_connection
def get_graph_vertex_property_names(self, graph_id=defaults.graph_id):
"""
Returns a list of the vertex property names for the graph with
the given graph id.
Parameters
----------
graph_id: int
The id of the graph of interest
"""
return self.__client.get_graph_vertex_property_names(graph_id)
@__server_connection
def get_graph_edge_property_names(self, graph_id=defaults.graph_id):
"""
Returns a list of the edge property names for the graph with
the given graph id.
Parameters
----------
graph_id: int
The id of the graph of interest
"""
return self.__client.get_graph_edge_property_names(graph_id)
@__server_connection
def get_graph_vertex_types(self, graph_id=defaults.graph_id):
"""
Returns a list of the vertex type names for the graph with
the given graph id.
Parameters
----------
graph_id: it
The id of the graph of interest
"""
return self.__client.get_graph_vertex_types(graph_id)
@__server_connection
def get_graph_edge_types(self, graph_id=defaults.graph_id):
"""
Returns a list of the edge type names for the graph with
the given graph id.
Parameters
----------
graph_id: int
The id of the graph of interest
"""
return self.__client.get_graph_edge_types(graph_id)
@__server_connection
def get_num_vertices(
self, vertex_type=None, include_edge_data=True, graph_id=defaults.graph_id
):
"""
Returns the number of vertices in the graph with the given
graph id.
Parameters
----------
vertex_type: string
The vertex type to count. If not defined, all types are counted.
include_edge_data: bool
Whether to include vertices added only as part of the edgelist.
graph_id: int
The id of the grpah of interest.
"""
return self.__client.get_num_vertices(
vertex_type or "", include_edge_data, graph_id
)
@__server_connection
def get_num_edges(self, edge_type=None, graph_id=defaults.graph_id):
"""
Returns the number of edges in the graph with the given
graph id.
Parameters
----------
edge_type: string
The edge type to count. If not defined, all types are counted.
graph_id: int
The id of the grpah of interest.
"""
return self.__client.get_num_edges(edge_type or "", graph_id)
###########################################################################
# Algos
@__server_connection
def batched_ego_graphs(self, seeds, radius=1, graph_id=defaults.graph_id):
"""
Parameters
----------
Returns
-------
Examples
--------
>>>
"""
# FIXME: finish docstring above
if not isinstance(seeds, list):
seeds = [seeds]
batched_ego_graphs_result = self.__client.batched_ego_graphs(
seeds, radius, graph_id
)
return (
batched_ego_graphs_result.src_verts,
batched_ego_graphs_result.dst_verts,
batched_ego_graphs_result.edge_weights,
batched_ego_graphs_result.seeds_offsets,
)
@__server_connection
def node2vec(self, start_vertices, max_depth, graph_id=defaults.graph_id):
"""
Computes random walks for each node in 'start_vertices', under the
node2vec sampling framework.
Parameters
----------
start_vertices: int or list or cudf.Series or cudf.DataFrame
A single node or a list or a cudf.Series of nodes from which to run
the random walks. In case of multi-column vertices it should be
a cudf.DataFrame. Only supports int32 currently.
max_depth: int
The maximum depth of the random walks
Returns
-------
Examples
--------
>>>
"""
# FIXME: finish docstring above
# start_vertices must be a list (cannot just be an iterable), and
# assume return value is tuple of python lists on host.
if not isinstance(start_vertices, list):
start_vertices = [start_vertices]
# FIXME: ensure list is a list of int32, since Thrift interface
# specifies that?
node2vec_result = self.__client.node2vec(start_vertices, max_depth, graph_id)
return (
node2vec_result.vertex_paths,
node2vec_result.edge_weights,
node2vec_result.path_sizes,
)
@__server_connection
def uniform_neighbor_sample(
self,
start_list,
fanout_vals,
with_replacement=True,
*,
graph_id=defaults.graph_id,
result_device=None,
):
"""
Samples the graph and returns a UniformNeighborSampleResult instance.
Parameters:
start_list : list[int]
fanout_vals : list[int]
with_replacement : bool
graph_id : int, default is defaults.graph_id
result_device : int, default is None
Returns
-------
result : UniformNeighborSampleResult
Instance containing three CuPy device arrays.
result.sources: CuPy array
Contains the source vertices from the sampling result
result.destinations: CuPy array
Contains the destination vertices from the sampling result
result.indices: CuPy array
Contains the indices from the sampling result for path reconstruction
"""
if result_device is not None:
result_obj = run_async(
self.__uniform_neighbor_sample_to_device,
start_list,
fanout_vals,
with_replacement,
graph_id,
result_device,
)
else:
result_obj = self.__client.uniform_neighbor_sample(
start_list,
fanout_vals,
with_replacement,
graph_id,
client_host=None,
client_result_port=None,
)
return result_obj
@__server_connection
def pagerank(self, graph_id=defaults.graph_id):
"""
pagerank
"""
raise NotImplementedError
###########################################################################
# Test/Debug
@__server_connection
def _create_test_array(self, nbytes):
"""
Creates an array of bytes (int8 values set to 1) on the server and
returns an ID to use to reference the array in later test calls.
The test array must be deleted on the server by calling
_delete_test_array().
"""
return self.__client.create_test_array(nbytes)
@__server_connection
def _delete_test_array(self, test_array_id):
"""
Deletes the test array on the server identified by test_array_id.
"""
self.__client.delete_test_array(test_array_id)
@__server_connection
def _receive_test_array(self, test_array_id, result_device=None):
"""
Returns the array of bytes (int8 values set to 1) from the server,
either to result_device or on the client host. The array returned must
have been created by a prior call to create_test_array() which returned
test_array_id.
This can be used to verify transfer speeds from server to client are
performing as expected.
"""
if result_device is not None:
return asyncio.run(
self.__receive_test_array_to_device(test_array_id, result_device)
)
else:
return self.__client.receive_test_array(test_array_id)
@__server_connection
def _get_graph_type(self, graph_id=defaults.graph_id):
"""
Test/debug API for returning a string repr of the graph_id instance.
"""
return self.__client.get_graph_type(graph_id)
###########################################################################
# Private
async def __receive_test_array_to_device(self, test_array_id, result_device):
# Create an object to set results on in the "receiver" callback below.
result_obj = type("Result", (), {})()
allocator = DeviceArrayAllocator(result_device)
async def receiver(endpoint):
with cp.cuda.Device(result_device):
result_obj.array = await endpoint.recv_obj(allocator=allocator)
result_obj.array = result_obj.array.view("int8")
await endpoint.close()
listener.close()
listener = ucp.create_listener(receiver, self.results_port)
# This sends a one-way request to the server and returns
# immediately. The server will create and send the array back to the
# listener started above.
self.__client.receive_test_array_to_device(
test_array_id, self.host, self.results_port
)
while not listener.closed():
await asyncio.sleep(0.05)
return result_obj.array
async def __uniform_neighbor_sample_to_device(
self, start_list, fanout_vals, with_replacement, graph_id, result_device
):
"""
Run uniform_neighbor_sample() with the args provided, but have the
result send directly to the device specified by result_device.
"""
# FIXME: check for valid device
result_obj = UniformNeighborSampleResult()
allocator = DeviceArrayAllocator(result_device)
async def receiver(endpoint):
with cp.cuda.Device(result_device):
result_obj.sources = await endpoint.recv_obj(allocator=allocator)
result_obj.sources = result_obj.sources.view("int32")
result_obj.destinations = await endpoint.recv_obj(allocator=allocator)
result_obj.destinations = result_obj.destinations.view("int32")
result_obj.indices = await endpoint.recv_obj(allocator=allocator)
result_obj.indices = result_obj.indices.view("float64")
await endpoint.close()
listener.close()
listener = ucp.create_listener(receiver, self.results_port)
# Use an excepthook to store an exception on the thread object if one is
# raised in the thread.
def excepthook(exc):
if exc.thread is not None:
exc.thread.exception = exc.exc_type(exc.exc_value)
orig_excepthook = threading.excepthook
threading.excepthook = excepthook
thread = threading.Thread(
target=self.__client.uniform_neighbor_sample,
args=(
start_list,
fanout_vals,
with_replacement,
graph_id,
self.host,
self.results_port,
),
)
thread.start()
# Poll the listener and the state of the thread. Close the listener if
# the thread died and raise the stored exception.
while not listener.closed():
await asyncio.sleep(0.05)
if not thread.is_alive():
listener.close()
threading.excepthook = orig_excepthook
if hasattr(thread, "exception"):
raise thread.exception
thread.join()
return result_obj
async def __call_extension_to_device(
self, func_name, func_args_repr, func_kwargs_repr, result_device
):
"""
Run the server-side extension func_name with the args/kwargs and have the
result sent directly to the device specified by result_device.
"""
# FIXME: there's probably a better way to do this, eg. create a class containing
# both allocator and receiver that maintains results, devices, etc. that's
# callable from the listener
result = []
# FIXME: check for valid device
allocator = DeviceArrayAllocator(result_device)
async def receiver(endpoint):
# Format of data sent is assumed to be:
# 1) a single array of length n describing the dtypes for the n arrays that
# follow
# 2) n arrays
with cp.cuda.Device(result_device):
# First get the array describing the data
# FIXME: meta_data doesn't need to be a cupy array
dtype_meta_data = await endpoint.recv_obj(allocator=allocator)
for dtype_enum in [int(i) for i in dtype_meta_data]:
# FIXME: safe to assume dtype_enum will always be valid?
dtype = extension_return_dtype_map[dtype_enum]
a = await endpoint.recv_obj(allocator=allocator)
result.append(a.view(dtype))
await endpoint.close()
listener.close()
listener = ucp.create_listener(receiver, self.results_port)
# Use an excepthook to store an exception on the thread object if one is
# raised in the thread.
def excepthook(exc):
if exc.thread is not None:
exc.thread.exception = exc.exc_type(exc.exc_value)
orig_excepthook = threading.excepthook
threading.excepthook = excepthook
thread = threading.Thread(
target=self.__client.call_extension,
args=(
func_name,
func_args_repr,
func_kwargs_repr,
self.host,
self.results_port,
),
)
thread.start()
# Poll the listener and the state of the thread. Close the listener if
# the thread died and raise the stored exception.
while not listener.closed():
await asyncio.sleep(0.05)
if not thread.is_alive():
listener.close()
threading.excepthook = orig_excepthook
if hasattr(thread, "exception"):
raise thread.exception
thread.join()
# special case, assume a list of len 1 should not be a list
if len(result) == 1:
result = result[0]
return result
@staticmethod
def __get_vertex_edge_id_obj(id_or_ids):
# Force np.ndarray
if not isinstance(id_or_ids, (int, Sequence, np.ndarray)):
if cupy_installed and isinstance(id_or_ids, cp.ndarray):
id_or_ids = id_or_ids.get()
elif cudf_installed and isinstance(id_or_ids, cudf.Series):
id_or_ids = id_or_ids.values_host
elif pandas_installed and isinstance(id_or_ids, pandas.Series):
id_or_ids = id_or_ids.to_numpy()
else:
raise ValueError(
f"No available module for processing {type(id_or_ids)}"
)
if isinstance(id_or_ids, Sequence):
vert_edge_id_obj = GraphVertexEdgeID(int64_ids=id_or_ids)
elif isinstance(id_or_ids, np.ndarray):
if id_or_ids.dtype == "int32":
vert_edge_id_obj = GraphVertexEdgeID(int32_ids=id_or_ids)
elif id_or_ids.dtype == "int64":
vert_edge_id_obj = GraphVertexEdgeID(int64_ids=id_or_ids)
else:
vert_edge_id_obj = GraphVertexEdgeID(int64_id=id_or_ids)
return vert_edge_id_obj
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.