repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
cugraph-branch-23.08/cpp/tests/c_api/mg_leiden_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_leiden_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_leiden_test(const cugraph_resource_handle_t* p_handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_result, size_t num_vertices, size_t num_edges, size_t max_level, double resolution, double theta, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* p_graph = NULL; cugraph_hierarchical_clustering_result_t* p_result = NULL; int rank = cugraph_resource_handle_get_rank(p_handle); cugraph_rng_state_t* rng_state; ret_code = cugraph_rng_state_create(p_handle, rank, &rng_state, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "rng_state create failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = create_mg_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_leiden( p_handle, rng_state, p_graph, max_level, resolution, theta, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, "cugraph_leiden failed."); if (test_ret_value == 0) { cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* clusters; vertices = cugraph_hierarchical_clustering_result_get_vertices(p_result); clusters = cugraph_hierarchical_clustering_result_get_clusters(p_result); double modularity = cugraph_hierarchical_clustering_result_get_modularity(p_result); vertex_t h_vertices[num_vertices]; edge_t h_clusters[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_clusters, clusters, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); size_t num_local_vertices = cugraph_type_erased_device_array_view_size(vertices); vertex_t max_component_id = -1; for (vertex_t i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { if (h_clusters[i] > max_component_id) max_component_id = h_clusters[i]; } vertex_t component_mapping[max_component_id + 1]; for (vertex_t i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { component_mapping[h_clusters[i]] = h_result[h_vertices[i]]; } #if 0 for (vertex_t i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, h_result[h_vertices[i]] == component_mapping[h_clusters[i]], "cluster results don't match"); } #endif cugraph_hierarchical_clustering_result_free(p_result); } cugraph_mg_graph_free(p_graph); cugraph_error_free(ret_error); return test_ret_value; } int test_leiden(const cugraph_resource_handle_t* handle) { size_t num_edges = 8; size_t num_vertices = 6; size_t max_level = 10; weight_t resolution = 1.0; weight_t theta = 1.0; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = { 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f, 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_result[] = {1, 0, 1, 0, 0, 0}; // Louvain wants store_transposed = FALSE return generic_leiden_test(handle, h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, max_level, resolution, theta, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_leiden, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
5,732
35.515924
100
c
cugraph-branch-23.08/cpp/tests/c_api/mg_louvain_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_louvain_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_louvain_test(const cugraph_resource_handle_t* p_handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_result, size_t num_vertices, size_t num_edges, size_t max_level, double resolution, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* p_graph = NULL; cugraph_hierarchical_clustering_result_t* p_result = NULL; ret_code = create_mg_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_louvain(p_handle, p_graph, max_level, resolution, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, "cugraph_louvain failed."); if (test_ret_value == 0) { cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* clusters; vertices = cugraph_hierarchical_clustering_result_get_vertices(p_result); clusters = cugraph_hierarchical_clustering_result_get_clusters(p_result); vertex_t h_vertices[num_vertices]; edge_t h_clusters[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_clusters, clusters, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); size_t num_local_vertices = cugraph_type_erased_device_array_view_size(vertices); vertex_t max_component_id = -1; for (vertex_t i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { if (h_clusters[i] > max_component_id) max_component_id = h_clusters[i]; } vertex_t component_mapping[max_component_id + 1]; for (vertex_t i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { component_mapping[h_clusters[i]] = h_result[h_vertices[i]]; } for (vertex_t i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, h_result[h_vertices[i]] == component_mapping[h_clusters[i]], "cluster results don't match"); } cugraph_hierarchical_clustering_result_free(p_result); } cugraph_mg_graph_free(p_graph); cugraph_error_free(ret_error); return test_ret_value; } int test_louvain(const cugraph_resource_handle_t* handle) { size_t num_edges = 8; size_t num_vertices = 6; size_t max_level = 10; weight_t resolution = 1.0; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = { 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f, 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_result[] = {1, 0, 1, 0, 0, 0}; // Louvain wants store_transposed = FALSE return generic_louvain_test( handle, h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, max_level, resolution, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_louvain, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
4,905
34.810219
100
c
cugraph-branch-23.08/cpp/tests/c_api/mg_random_walks_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_random_walks_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_uniform_random_walks_test(const cugraph_resource_handle_t* handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, size_t num_vertices, size_t num_edges, vertex_t* h_start, size_t num_starts, size_t max_depth, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_graph_t* graph = NULL; cugraph_random_walk_result_t* result = NULL; cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; ret_code = create_mg_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)h_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start copy_from_host failed."); ret_code = cugraph_uniform_random_walks(handle, graph, d_start_view, max_depth, &result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "uniform_random_walks failed."); cugraph_type_erased_device_array_view_t* verts; cugraph_type_erased_device_array_view_t* wgts; verts = cugraph_random_walk_result_get_paths(result); wgts = cugraph_random_walk_result_get_weights(result); size_t verts_size = cugraph_type_erased_device_array_view_size(verts); size_t wgts_size = cugraph_type_erased_device_array_view_size(wgts); vertex_t h_result_verts[verts_size]; weight_t h_result_wgts[wgts_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_verts, verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_wgts, wgts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph weight_t M[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) M[i][j] = -1; for (int i = 0; i < num_edges; ++i) M[h_src[i]][h_dst[i]] = h_wgt[i]; TEST_ASSERT(test_ret_value, cugraph_random_walk_result_get_max_path_length(result) == max_depth, "path length does not match"); for (int i = 0; (i < num_starts) && (test_ret_value == 0); ++i) { TEST_ASSERT( test_ret_value, h_start[i] == h_result_verts[i * (max_depth + 1)], "start of path not found"); for (size_t j = 0; j < max_depth; ++j) { int src_index = i * (max_depth + 1) + j; int dst_index = src_index + 1; if (h_result_verts[dst_index] < 0) { if (h_result_verts[src_index] >= 0) { int departing_count = 0; for (int k = 0; k < num_vertices; ++k) { if (M[h_result_verts[src_index]][k] >= 0) departing_count++; } TEST_ASSERT(test_ret_value, departing_count == 0, "uniform_random_walks found no edge when an edge exists"); } } else { TEST_ASSERT(test_ret_value, M[h_result_verts[src_index]][h_result_verts[dst_index]] == h_result_wgts[i * max_depth + j], "uniform_random_walks got edge that doesn't exist"); } } } cugraph_random_walk_result_free(result); cugraph_mg_graph_free(graph); cugraph_error_free(ret_error); return test_ret_value; } int generic_biased_random_walks_test(const cugraph_resource_handle_t* handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, size_t num_vertices, size_t num_edges, vertex_t* h_start, size_t num_starts, size_t max_depth, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_graph_t* graph = NULL; cugraph_random_walk_result_t* result = NULL; cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; ret_code = create_mg_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)h_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start copy_from_host failed."); ret_code = cugraph_biased_random_walks(handle, graph, d_start_view, FALSE, &result, &ret_error); #if 1 TEST_ASSERT(test_ret_value, ret_code != CUGRAPH_SUCCESS, "biased_random_walks should have failed") #else TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "biased_random_walks failed."); cugraph_type_erased_device_array_view_t* verts; cugraph_type_erased_device_array_view_t* wgts; verts = cugraph_random_walk_result_get_paths(result); wgts = cugraph_random_walk_result_get_weights(result); size_t verts_size = cugraph_type_erased_device_array_view_size(verts); size_t wgts_size = cugraph_type_erased_device_array_view_size(wgts); vertex_t h_result_verts[verts_size]; vertex_t h_result_wgts[wgts_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host(handle, (byte_t*)h_verts, verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_wgts, wgts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph weight_t M[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) M[i][j] = -1; for (int i = 0; i < num_edges; ++i) M[h_src[i]][h_dst[i]] = h_wgt[i]; TEST_ASSERT(test_ret_value, cugraph_random_walk_result_get_max_path_length() == max_depth, "path length does not match"); for (int i = 0; (i < num_starts) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M[h_start[i]][h_result_verts[i * (max_depth + 1)]] == h_result_wgts[i * max_depth], "biased_random_walks got edge that doesn't exist"); for (size_t j = 1; j < cugraph_random_walk_result_get_max_path_length(); ++j) TEST_ASSERT( test_ret_value, M[h_start[i * (max_depth + 1) + j - 1]][h_result_verts[i * (max_depth + 1) + j]] == h_result_wgts[i * max_depth + j - 1], "biased_random_walks got edge that doesn't exist"); } cugraph_random_walk_result_free(result); #endif cugraph_mg_graph_free(graph); cugraph_error_free(ret_error); return test_ret_value; } int generic_node2vec_random_walks_test(const cugraph_resource_handle_t* handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, size_t num_vertices, size_t num_edges, vertex_t* h_start, size_t num_starts, size_t max_depth, float p, float q, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_graph_t* graph = NULL; cugraph_random_walk_result_t* result = NULL; cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; ret_code = create_mg_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)h_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start copy_from_host failed."); ret_code = cugraph_node2vec_random_walks(handle, graph, d_start_view, FALSE, p, q, &result, &ret_error); #if 1 TEST_ASSERT( test_ret_value, ret_code != CUGRAPH_SUCCESS, "node2vec_random_walks should have failed") #else TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "node2vec_random_walks failed."); cugraph_type_erased_device_array_view_t* verts; cugraph_type_erased_device_array_view_t* wgts; verts = cugraph_random_walk_result_get_paths(result); wgts = cugraph_random_walk_result_get_weights(result); size_t verts_size = cugraph_type_erased_device_array_view_size(verts); size_t wgts_size = cugraph_type_erased_device_array_view_size(wgts); vertex_t h_result_verts[verts_size]; vertex_t h_result_wgts[wgts_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host(handle, (byte_t*)h_verts, verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_wgts, wgts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph weight_t M[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) M[i][j] = -1; for (int i = 0; i < num_edges; ++i) M[h_src[i]][h_dst[i]] = h_wgt[i]; TEST_ASSERT(test_ret_value, cugraph_random_walk_result_get_max_path_length() == max_depth, "path length does not match"); for (int i = 0; (i < num_starts) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M[h_start[i]][h_result_verts[i * (max_depth + 1)]] == h_result_wgts[i * max_depth], "node2vec_random_walks got edge that doesn't exist"); for (size_t j = 1; j < cugraph_random_walk_result_get_max_path_length(); ++j) TEST_ASSERT( test_ret_value, M[h_start[i * (max_depth + 1) + j - 1]][h_result_verts[i * (max_depth + 1) + j]] == h_result_wgts[i * max_depth + j - 1], "node2vec_random_walks got edge that doesn't exist"); } cugraph_random_walk_result_free(result); #endif cugraph_mg_graph_free(graph); cugraph_error_free(ret_error); return test_ret_value; } int test_uniform_random_walks(const cugraph_resource_handle_t* handle) { size_t num_edges = 8; size_t num_vertices = 6; size_t num_starts = 2; size_t max_depth = 3; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t wgt[] = {0, 1, 2, 3, 4, 5, 6, 7}; vertex_t start[] = {2, 2}; return generic_uniform_random_walks_test( handle, src, dst, wgt, num_vertices, num_edges, start, num_starts, max_depth, FALSE); } int test_biased_random_walks(const cugraph_resource_handle_t* handle) { size_t num_edges = 8; size_t num_vertices = 6; size_t num_starts = 2; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t start[] = {2, 2}; return generic_biased_random_walks_test( handle, src, dst, wgt, num_vertices, num_edges, start, num_starts, FALSE, FALSE); } int test_node2vec_random_walks(const cugraph_resource_handle_t* handle) { size_t num_edges = 8; size_t num_vertices = 6; size_t num_starts = 2; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t start[] = {2, 2}; weight_t p = 5; weight_t q = 8; return generic_node2vec_random_walks_test( handle, src, dst, wgt, num_vertices, num_edges, start, num_starts, p, q, FALSE, FALSE); } int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_uniform_random_walks, handle); result |= RUN_MG_TEST(test_biased_random_walks, handle); result |= RUN_MG_TEST(test_node2vec_random_walks, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
15,771
37.374696
100
c
cugraph-branch-23.08/cpp/tests/c_api/mg_similarity_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_similarity_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/array.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; typedef enum { JACCARD, SORENSEN, OVERLAP } similarity_t; int generic_similarity_test(const cugraph_resource_handle_t* handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_first, vertex_t* h_second, weight_t* h_result, size_t num_vertices, size_t num_edges, size_t num_pairs, bool_t store_transposed, bool_t use_weight, similarity_t test_type) { int test_ret_value = 0; data_type_id_t vertex_tid = INT32; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* graph = NULL; cugraph_similarity_result_t* result = NULL; cugraph_vertex_pairs_t* vertex_pairs = NULL; cugraph_type_erased_device_array_t* v1 = NULL; cugraph_type_erased_device_array_t* v2 = NULL; cugraph_type_erased_device_array_view_t* v1_view = NULL; cugraph_type_erased_device_array_view_t* v2_view = NULL; ret_code = create_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, TRUE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); if (cugraph_resource_handle_get_rank(handle) != 0) { num_pairs = 0; } ret_code = cugraph_type_erased_device_array_create(handle, num_pairs, vertex_tid, &v1, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "v1 create failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_pairs, vertex_tid, &v2, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "v2 create failed."); v1_view = cugraph_type_erased_device_array_view(v1); v2_view = cugraph_type_erased_device_array_view(v2); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, v1_view, (byte_t*)h_first, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "h_first copy_from_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, v2_view, (byte_t*)h_second, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "h_second copy_from_host failed."); ret_code = cugraph_create_vertex_pairs(handle, graph, v1_view, v2_view, &vertex_pairs, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create vertex pairs failed."); switch (test_type) { case JACCARD: ret_code = cugraph_jaccard_coefficients( handle, graph, vertex_pairs, use_weight, FALSE, &result, &ret_error); break; case SORENSEN: ret_code = cugraph_sorensen_coefficients( handle, graph, vertex_pairs, use_weight, FALSE, &result, &ret_error); break; case OVERLAP: ret_code = cugraph_overlap_coefficients( handle, graph, vertex_pairs, use_weight, FALSE, &result, &ret_error); break; } TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph similarity failed."); cugraph_type_erased_device_array_view_t* similarity_coefficient; similarity_coefficient = cugraph_similarity_result_get_similarity(result); weight_t h_similarity_coefficient[num_pairs]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_similarity_coefficient, similarity_coefficient, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_pairs) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(h_similarity_coefficient[i], h_result[i], 0.001), "similarity results don't match"); } if (result != NULL) cugraph_similarity_result_free(result); if (vertex_pairs != NULL) cugraph_vertex_pairs_free(vertex_pairs); cugraph_mg_graph_free(graph); cugraph_error_free(ret_error); return test_ret_value; } int test_jaccard(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0.2, 0.666667, 0.333333, 0.4, 0.166667, 0.5, 0.2, 0.25, 0.25, 0.666667}; return generic_similarity_test(handle, h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, FALSE, JACCARD); } int test_weighted_jaccard(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // TODO: Fill in return generic_similarity_test(handle, h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, TRUE, JACCARD); } int test_sorensen(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0.333333, 0.8, 0.5, 0.571429, 0.285714, 0.666667, 0.333333, 0.4, 0.4, 0.8}; return generic_similarity_test(handle, h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, FALSE, SORENSEN); } int test_weighted_sorensen(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // TODO: Fill in return generic_similarity_test(handle, h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, TRUE, SORENSEN); } int test_overlap(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0.5, 1, 0.5, 0.666667, 0.333333, 1, 0.333333, 0.5, 0.5, 1}; return generic_similarity_test(handle, h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, FALSE, OVERLAP); } int test_weighted_overlap(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // TODO: Fill in return generic_similarity_test(handle, h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, TRUE, OVERLAP); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_jaccard, handle); result |= RUN_MG_TEST(test_sorensen, handle); result |= RUN_MG_TEST(test_overlap, handle); // result |= RUN_MG_TEST(test_weighted_jaccard, handle); // result |= RUN_MG_TEST(test_weighted_sorensen, handle); // result |= RUN_MG_TEST(test_weighted_overlap, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
12,594
38.359375
100
c
cugraph-branch-23.08/cpp/tests/c_api/mg_sssp_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_sssp_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <float.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; const float EPSILON = 0.001; int generic_sssp_test(const cugraph_resource_handle_t* p_handle, vertex_t* h_src, vertex_t* h_dst, float* h_wgt, vertex_t source, float const* expected_distances, vertex_t const* expected_predecessors, size_t num_vertices, size_t num_edges, float cutoff, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* p_graph = NULL; cugraph_paths_result_t* p_result = NULL; ret_code = create_mg_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &p_graph, &ret_error); ret_code = cugraph_sssp(p_handle, p_graph, source, cutoff, TRUE, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_sssp failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* distances; cugraph_type_erased_device_array_view_t* predecessors; vertices = cugraph_paths_result_get_vertices(p_result); distances = cugraph_paths_result_get_distances(p_result); predecessors = cugraph_paths_result_get_predecessors(p_result); vertex_t h_vertices[num_vertices]; float h_distances[num_vertices]; vertex_t h_predecessors[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_distances, distances, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_predecessors, predecessors, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); size_t num_local_vertices = cugraph_type_erased_device_array_view_size(vertices); for (int i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(expected_distances[h_vertices[i]], h_distances[i], EPSILON), "sssp distances don't match"); TEST_ASSERT(test_ret_value, expected_predecessors[h_vertices[i]] == h_predecessors[i], "sssp predecessors don't match"); } cugraph_type_erased_device_array_view_free(vertices); cugraph_type_erased_device_array_view_free(distances); cugraph_type_erased_device_array_view_free(predecessors); cugraph_paths_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_error_free(ret_error); return test_ret_value; } int generic_sssp_test_double(const cugraph_resource_handle_t* p_handle, vertex_t* h_src, vertex_t* h_dst, double* h_wgt, vertex_t source, double const* expected_distances, vertex_t const* expected_predecessors, size_t num_vertices, size_t num_edges, double cutoff, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* p_graph = NULL; cugraph_paths_result_t* p_result = NULL; ret_code = create_mg_test_graph_double( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &p_graph, &ret_error); ret_code = cugraph_sssp(p_handle, p_graph, source, cutoff, TRUE, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_sssp failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* distances; cugraph_type_erased_device_array_view_t* predecessors; vertices = cugraph_paths_result_get_vertices(p_result); distances = cugraph_paths_result_get_distances(p_result); predecessors = cugraph_paths_result_get_predecessors(p_result); vertex_t h_vertices[num_vertices]; double h_distances[num_vertices]; vertex_t h_predecessors[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_distances, distances, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_predecessors, predecessors, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); size_t num_local_vertices = cugraph_type_erased_device_array_view_size(vertices); for (int i = 0; (i < num_local_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqualDouble(expected_distances[h_vertices[i]], h_distances[i], EPSILON), "sssp distances don't match"); TEST_ASSERT(test_ret_value, expected_predecessors[h_vertices[i]] == h_predecessors[i], "sssp predecessors don't match"); } cugraph_type_erased_device_array_view_free(vertices); cugraph_type_erased_device_array_view_free(distances); cugraph_type_erased_device_array_view_free(predecessors); cugraph_paths_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_error_free(ret_error); return test_ret_value; } int test_sssp(const cugraph_resource_handle_t* p_handle) { size_t num_edges = 8; size_t num_vertices = 6; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; float wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; float expected_distances[] = {0.0f, 0.1f, FLT_MAX, 2.2f, 1.2f, 4.4f}; vertex_t expected_predecessors[] = {-1, 0, -1, 1, 1, 4}; // Bfs wants store_transposed = FALSE return generic_sssp_test(p_handle, src, dst, wgt, 0, expected_distances, expected_predecessors, num_vertices, num_edges, 10, FALSE); } int test_sssp_with_transpose(const cugraph_resource_handle_t* p_handle) { size_t num_edges = 8; size_t num_vertices = 6; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; float wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; float expected_distances[] = {0.0f, 0.1f, FLT_MAX, 2.2f, 1.2f, 4.4f}; vertex_t expected_predecessors[] = {-1, 0, -1, 1, 1, 4}; // Bfs wants store_transposed = FALSE // This call will force cugraph_sssp to transpose the graph return generic_sssp_test(p_handle, src, dst, wgt, 0, expected_distances, expected_predecessors, num_vertices, num_edges, 10, TRUE); } int test_sssp_with_transpose_double(const cugraph_resource_handle_t* p_handle) { size_t num_edges = 8; size_t num_vertices = 6; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; double wgt[] = {0.1d, 2.1d, 1.1d, 5.1d, 3.1d, 4.1d, 7.2d, 3.2d}; double expected_distances[] = {0.0d, 0.1d, DBL_MAX, 2.2d, 1.2d, 4.4d}; vertex_t expected_predecessors[] = {-1, 0, -1, 1, 1, 4}; // Bfs wants store_transposed = FALSE // This call will force cugraph_sssp to transpose the graph return generic_sssp_test_double(p_handle, src, dst, wgt, 0, expected_distances, expected_predecessors, num_vertices, num_edges, 10, TRUE); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_sssp, handle); result |= RUN_MG_TEST(test_sssp_with_transpose, handle); result |= RUN_MG_TEST(test_sssp_with_transpose_double, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
10,487
37.844444
97
c
cugraph-branch-23.08/cpp/tests/c_api/mg_strongly_connected_components_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_strongly_connected_components_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_scc_test(const cugraph_resource_handle_t* handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_result, size_t num_vertices, size_t num_edges, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* p_graph = NULL; cugraph_labeling_result_t* p_result = NULL; ret_code = create_mg_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_mg_test_graph failed."); ret_code = cugraph_strongly_connected_components(handle, p_graph, FALSE, &p_result, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_NOT_IMPLEMENTED, "SCC should not be implemented, but is"); #if 0 // FIXME: Actual implementation will be something like this TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_strongly_connected_components failed."); // NOTE: Because we get back vertex ids and components, we can simply compare // the returned values with the expected results for the entire // graph. Each GPU will have a subset of the total vertices, so // they will do a subset of the comparisons. cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* components; vertices = cugraph_labeling_result_get_vertices(p_result); components = cugraph_labeling_result_get_labels(p_result); size_t num_local_vertices = cugraph_type_erased_device_array_view_size(vertices); vertex_t h_vertices[num_local_vertices]; vertex_t h_components[num_local_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_components, components, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); vertex_t component_check[num_vertices]; for (vertex_t i = 0; i < num_vertices; ++i) { component_check[i] = num_vertices; } vertex_t num_errors = 0; for (vertex_t i = 0; i < num_local_vertices; ++i) { if (component_check[h_components[i]] == num_vertices) { component_check[h_components[i]] = h_result[h_vertices[i]]; } else if (component_check[h_components[i]] != h_result[h_vertices[i]]) { ++num_errors; } } TEST_ASSERT(test_ret_value, num_errors == 0, "strongly connected components results don't match"); cugraph_type_erased_device_array_view_free(components); cugraph_type_erased_device_array_view_free(vertices); cugraph_labeling_result_free(p_result); #endif cugraph_mg_graph_free(p_graph); cugraph_error_free(ret_error); return test_ret_value; } int test_strongly_connected_components(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 12; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 6, 7, 7, 8, 8, 8, 9, 10}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 7, 9, 10, 6, 7, 9, 11, 11}; weight_t h_wgt[] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_result[] = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}; // SCC wants store_transposed = FALSE return generic_scc_test(handle, h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_strongly_connected_components, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
4,944
34.57554
100
c
cugraph-branch-23.08/cpp/tests/c_api/mg_test_utils.h
cugraph-branch-23.08/cpp/tests/c_api/mg_test_utils.h
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include "c_test_utils.h" #include <mpi.h> #include <stdlib.h> #define C_MPI_TRY(call) \ do { \ int status = call; \ if (MPI_SUCCESS != status) { \ int mpi_error_string_lenght = 0; \ char mpi_error_string[MPI_MAX_ERROR_STRING]; \ MPI_Error_string(status, mpi_error_string, &mpi_error_string_lenght); \ printf("MPI call='%s' at file=%s line=%d failed with %s ", \ #call, \ __FILE__, \ __LINE__, \ mpi_error_string); \ exit(1); \ } \ } while (0) #define C_CUDA_TRY(call) \ do { \ cudaError_t const status = call; \ if (status != cudaSuccess) { \ cudaGetLastError(); \ printf( \ "CUDA error encountered at: " \ "call='%s', Reason=%s:%s", \ #call, \ cudaGetErrorName(status), \ cudaGetErrorString(status)); \ exit(1); \ } \ } while (0) #ifdef __cplusplus extern "C" { #endif int run_mg_test(int (*test)(const cugraph_resource_handle_t*), const char* test_name, const cugraph_resource_handle_t* rank); #define RUN_MG_TEST(test_name, handle) run_mg_test(test_name, #test_name, handle) void* create_mg_raft_handle(int argc, char** argv); void free_mg_raft_handle(void* raft_handle); int create_mg_test_graph(const cugraph_resource_handle_t* p_handle, int32_t* h_src, int32_t* h_dst, float* h_wgt, size_t num_edges, bool_t store_transposed, bool_t is_symmetric, cugraph_graph_t** p_graph, cugraph_error_t** ret_error); int create_mg_test_graph_double(const cugraph_resource_handle_t* p_handle, int32_t* h_src, int32_t* h_dst, double* h_wgt, size_t num_edges, bool_t store_transposed, bool_t is_symmetric, cugraph_graph_t** p_graph, cugraph_error_t** ret_error); int create_mg_test_graph_with_edge_ids(const cugraph_resource_handle_t* p_handle, int32_t* h_src, int32_t* h_dst, int32_t* h_idx, size_t num_edges, bool_t store_transposed, bool_t is_symmetric, cugraph_graph_t** p_graph, cugraph_error_t** ret_error); int create_mg_test_graph_with_properties(const cugraph_resource_handle_t* p_handle, int32_t* h_src, int32_t* h_dst, int32_t* h_idx, int32_t* h_type, float* h_wgt, size_t num_edges, bool_t store_transposed, bool_t is_symmetric, cugraph_graph_t** p_graph, cugraph_error_t** ret_error); #ifdef __cplusplus } #endif
4,937
43.089286
83
h
cugraph-branch-23.08/cpp/tests/c_api/mg_triangle_count_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_triangle_count_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_triangle_count_test(const cugraph_resource_handle_t* handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_verts, edge_t* h_result, size_t num_vertices, size_t num_edges, size_t num_results, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* p_graph = NULL; cugraph_triangle_count_result_t* p_result = NULL; cugraph_type_erased_device_array_t* p_start = NULL; cugraph_type_erased_device_array_view_t* p_start_view = NULL; int rank = cugraph_resource_handle_get_rank(handle); ret_code = create_mg_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, TRUE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_mg_test_graph failed."); if (h_verts != NULL) { if (rank == 0) { ret_code = cugraph_type_erased_device_array_create(handle, num_results, INT32, &p_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "p_start create failed."); p_start_view = cugraph_type_erased_device_array_view(p_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, p_start_view, (byte_t*)h_verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "src copy_from_host failed."); } else { p_start_view = cugraph_type_erased_device_array_view_create(NULL, 0, INT32); } } ret_code = cugraph_triangle_count(handle, p_graph, p_start_view, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, "cugraph_triangle_count failed."); if (test_ret_value == 0) { cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* counts; vertices = cugraph_triangle_count_result_get_vertices(p_result); counts = cugraph_triangle_count_result_get_counts(p_result); vertex_t num_local_results = cugraph_type_erased_device_array_view_size(vertices); vertex_t h_vertices[num_local_results]; edge_t h_counts[num_local_results]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_counts, counts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_local_results) && (test_ret_value == 0); ++i) { for (int j = 0; j < num_results; ++j) { if (h_vertices[i] == h_verts[j]) { TEST_ASSERT(test_ret_value, h_counts[i] == h_result[j], "counts results don't match"); } } } cugraph_triangle_count_result_free(p_result); } cugraph_mg_graph_free(p_graph); cugraph_error_free(ret_error); return test_ret_value; } int test_triangle_count(const cugraph_resource_handle_t* handle) { size_t num_edges = 16; size_t num_vertices = 6; size_t num_results = 4; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = { 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f, 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_verts[] = {0, 1, 2, 4}; edge_t h_result[] = {1, 2, 2, 0}; // Triangle Count wants store_transposed = FALSE return generic_triangle_count_test( handle, h_src, h_dst, h_wgt, h_verts, h_result, num_vertices, num_edges, num_results, FALSE); } int test_triangle_count_dolphins(const cugraph_resource_handle_t* handle) { size_t num_edges = 318; size_t num_vertices = 62; vertex_t h_src[] = { 10, 14, 15, 40, 42, 47, 17, 19, 26, 27, 28, 36, 41, 54, 10, 42, 44, 61, 8, 14, 59, 51, 9, 13, 56, 57, 9, 13, 17, 54, 56, 57, 19, 27, 30, 40, 54, 20, 28, 37, 45, 59, 13, 17, 32, 41, 57, 29, 42, 47, 51, 33, 17, 32, 41, 54, 57, 16, 24, 33, 34, 37, 38, 40, 43, 50, 52, 18, 24, 40, 45, 55, 59, 20, 33, 37, 38, 50, 22, 25, 27, 31, 57, 20, 21, 24, 29, 45, 51, 30, 54, 28, 36, 38, 44, 47, 50, 29, 33, 37, 45, 51, 36, 45, 51, 29, 45, 51, 26, 27, 27, 30, 47, 35, 43, 45, 51, 52, 42, 47, 60, 34, 37, 38, 40, 43, 50, 37, 44, 49, 37, 39, 40, 59, 40, 43, 45, 61, 43, 44, 52, 58, 57, 52, 54, 57, 47, 50, 46, 53, 50, 51, 59, 49, 57, 51, 55, 61, 57, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 11, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 23, 23, 23, 24, 24, 24, 25, 25, 26, 28, 28, 29, 29, 29, 29, 29, 30, 30, 32, 33, 33, 33, 33, 33, 33, 34, 34, 34, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 40, 41, 41, 42, 42, 43, 43, 45, 45, 45, 46, 48, 50, 51, 53, 54}; vertex_t h_dst[] = { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 11, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 23, 23, 23, 24, 24, 24, 25, 25, 26, 28, 28, 29, 29, 29, 29, 29, 30, 30, 32, 33, 33, 33, 33, 33, 33, 34, 34, 34, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 40, 41, 41, 42, 42, 43, 43, 45, 45, 45, 46, 48, 50, 51, 53, 54, 10, 14, 15, 40, 42, 47, 17, 19, 26, 27, 28, 36, 41, 54, 10, 42, 44, 61, 8, 14, 59, 51, 9, 13, 56, 57, 9, 13, 17, 54, 56, 57, 19, 27, 30, 40, 54, 20, 28, 37, 45, 59, 13, 17, 32, 41, 57, 29, 42, 47, 51, 33, 17, 32, 41, 54, 57, 16, 24, 33, 34, 37, 38, 40, 43, 50, 52, 18, 24, 40, 45, 55, 59, 20, 33, 37, 38, 50, 22, 25, 27, 31, 57, 20, 21, 24, 29, 45, 51, 30, 54, 28, 36, 38, 44, 47, 50, 29, 33, 37, 45, 51, 36, 45, 51, 29, 45, 51, 26, 27, 27, 30, 47, 35, 43, 45, 51, 52, 42, 47, 60, 34, 37, 38, 40, 43, 50, 37, 44, 49, 37, 39, 40, 59, 40, 43, 45, 61, 43, 44, 52, 58, 57, 52, 54, 57, 47, 50, 46, 53, 50, 51, 59, 49, 57, 51, 55, 61, 57}; weight_t h_wgt[] = { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; vertex_t h_verts[] = {11, 48, 0}; edge_t h_result[] = {0, 0, 5}; size_t num_results = 3; // Triangle Count wants store_transposed = FALSE return generic_triangle_count_test( handle, h_src, h_dst, h_wgt, h_verts, h_result, num_vertices, num_edges, num_results, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_triangle_count, handle); result |= RUN_MG_TEST(test_triangle_count_dolphins, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
10,550
47.847222
100
c
cugraph-branch-23.08/cpp/tests/c_api/mg_two_hop_neighbors_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_two_hop_neighbors_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/array.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_two_hop_nbr_test(const cugraph_resource_handle_t* resource_handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_sources, vertex_t* h_result_v1, vertex_t* h_result_v2, size_t num_vertices, size_t num_edges, size_t num_sources, size_t num_result_pairs, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* graph = NULL; cugraph_type_erased_device_array_t* start_vertices = NULL; cugraph_type_erased_device_array_view_t* start_vertices_view = NULL; cugraph_vertex_pairs_t* result = NULL; int rank = cugraph_resource_handle_get_rank(resource_handle); ret_code = create_mg_test_graph( resource_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, TRUE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); if (num_sources > 0) { if (rank == 0) { ret_code = cugraph_type_erased_device_array_create( resource_handle, num_sources, INT32, &start_vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "p_sources create failed."); start_vertices_view = cugraph_type_erased_device_array_view(start_vertices); ret_code = cugraph_type_erased_device_array_view_copy_from_host( resource_handle, start_vertices_view, (byte_t*)h_sources, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "src copy_from_host failed."); } else { start_vertices_view = cugraph_type_erased_device_array_view_create(NULL, 0, INT32); } } ret_code = cugraph_two_hop_neighbors( resource_handle, graph, start_vertices_view, FALSE, &result, &ret_error); cugraph_type_erased_device_array_view_t* v1; cugraph_type_erased_device_array_view_t* v2; v1 = cugraph_vertex_pairs_get_first(result); v2 = cugraph_vertex_pairs_get_second(result); size_t number_of_pairs = cugraph_type_erased_device_array_view_size(v1); vertex_t h_v1[number_of_pairs]; vertex_t h_v2[number_of_pairs]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( resource_handle, (byte_t*)h_v1, v1, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( resource_handle, (byte_t*)h_v2, v2, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); bool_t M[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) M[i][j] = FALSE; for (int i = 0; i < num_result_pairs; ++i) M[h_result_v1[i]][h_result_v2[i]] = TRUE; for (int i = 0; (i < number_of_pairs) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M[h_v1[i]][h_v2[i]], "result not found"); } cugraph_vertex_pairs_free(result); cugraph_type_erased_device_array_view_free(start_vertices_view); cugraph_type_erased_device_array_free(start_vertices); cugraph_mg_graph_free(graph); cugraph_error_free(ret_error); return test_ret_value; } int test_two_hop_nbr_all(const cugraph_resource_handle_t* handle) { size_t num_edges = 22; size_t num_vertices = 7; size_t num_sources = 0; size_t num_result_pairs = 43; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5, 3, 1, 4, 5, 5, 6}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 3, 1, 6, 5}; weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_result_v1[] = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6}; vertex_t h_result_v2[] = {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 1, 3, 4, 6}; return generic_two_hop_nbr_test(handle, h_src, h_dst, h_wgt, NULL, h_result_v1, h_result_v2, num_vertices, num_edges, num_sources, num_result_pairs, FALSE); } int test_two_hop_nbr_one(const cugraph_resource_handle_t* handle) { size_t num_edges = 22; size_t num_vertices = 7; size_t num_sources = 1; size_t num_result_pairs = 6; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5, 3, 1, 4, 5, 5, 6}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 3, 1, 6, 5}; weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_sources[] = {0}; vertex_t h_result_v1[] = {0, 0, 0, 0, 0, 0}; vertex_t h_result_v2[] = {0, 1, 2, 3, 4, 5}; return generic_two_hop_nbr_test(handle, h_src, h_dst, h_wgt, h_sources, h_result_v1, h_result_v2, num_vertices, num_edges, num_sources, num_result_pairs, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_two_hop_nbr_all, handle); result |= RUN_MG_TEST(test_two_hop_nbr_one, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
7,587
37.517766
97
c
cugraph-branch-23.08/cpp/tests/c_api/mg_weakly_connected_components_test.c
cugraph-branch-23.08/cpp/tests/c_api/mg_weakly_connected_components_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mg_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_wcc_test(const cugraph_resource_handle_t* handle, vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_result, size_t num_vertices, size_t num_edges, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_graph_t* p_graph = NULL; cugraph_labeling_result_t* p_result = NULL; ret_code = create_mg_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, TRUE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_mg_test_graph failed."); ret_code = cugraph_weakly_connected_components(handle, p_graph, FALSE, &p_result, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_weakly_connected_components failed."); // NOTE: Because we get back vertex ids and components, we can simply compare // the returned values with the expected results for the entire // graph. Each GPU will have a subset of the total vertices, so // they will do a subset of the comparisons. cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* components; vertices = cugraph_labeling_result_get_vertices(p_result); components = cugraph_labeling_result_get_labels(p_result); size_t num_local_vertices = cugraph_type_erased_device_array_view_size(vertices); vertex_t h_vertices[num_local_vertices]; vertex_t h_components[num_local_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_components, components, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); vertex_t component_check[num_vertices]; for (vertex_t i = 0; i < num_vertices; ++i) { component_check[i] = num_vertices; } vertex_t num_errors = 0; for (vertex_t i = 0; i < num_local_vertices; ++i) { if (component_check[h_components[i]] == num_vertices) { component_check[h_components[i]] = h_result[h_vertices[i]]; } else if (component_check[h_components[i]] != h_result[h_vertices[i]]) { ++num_errors; } } TEST_ASSERT(test_ret_value, num_errors == 0, "weakly connected components results don't match"); cugraph_type_erased_device_array_view_free(components); cugraph_type_erased_device_array_view_free(vertices); cugraph_labeling_result_free(p_result); cugraph_mg_graph_free(p_graph); cugraph_error_free(ret_error); return test_ret_value; } int test_weakly_connected_components(const cugraph_resource_handle_t* handle) { size_t num_edges = 32; size_t num_vertices = 12; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 6, 7, 7, 8, 8, 8, 9, 10, 1, 3, 4, 0, 1, 3, 5, 5, 7, 9, 10, 6, 7, 9, 11, 11}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 7, 9, 10, 6, 7, 9, 11, 11, 0, 1, 1, 2, 2, 2, 3, 4, 6, 7, 7, 8, 8, 8, 9, 10}; weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_result[] = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}; // WCC wants store_transposed = FALSE return generic_wcc_test(handle, h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { void* raft_handle = create_mg_raft_handle(argc, argv); cugraph_resource_handle_t* handle = cugraph_create_resource_handle(raft_handle); int result = 0; result |= RUN_MG_TEST(test_weakly_connected_components, handle); cugraph_free_resource_handle(handle); free_mg_raft_handle(raft_handle); return result; }
5,030
36.266667
98
c
cugraph-branch-23.08/cpp/tests/c_api/pagerank_test.c
cugraph-branch-23.08/cpp/tests/c_api/pagerank_test.c
/* * Copyright (c) 2021-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_pagerank_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, weight_t* h_result, size_t num_vertices, size_t num_edges, bool_t store_transposed, double alpha, double epsilon, size_t max_iterations) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_centrality_result_t* p_result = NULL; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_pagerank(p_handle, p_graph, NULL, NULL, NULL, NULL, alpha, epsilon, max_iterations, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_pagerank failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* pageranks; vertices = cugraph_centrality_result_get_vertices(p_result); pageranks = cugraph_centrality_result_get_values(p_result); vertex_t h_vertices[num_vertices]; weight_t h_pageranks[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_pageranks, pageranks, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(h_result[h_vertices[i]], h_pageranks[i], 0.001), "pagerank results don't match"); } cugraph_centrality_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int generic_pagerank_nonconverging_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, weight_t* h_result, size_t num_vertices, size_t num_edges, bool_t store_transposed, double alpha, double epsilon, size_t max_iterations) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_centrality_result_t* p_result = NULL; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_pagerank_allow_nonconvergence(p_handle, p_graph, NULL, NULL, NULL, NULL, alpha, epsilon, max_iterations, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_pagerank failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* pageranks; vertices = cugraph_centrality_result_get_vertices(p_result); pageranks = cugraph_centrality_result_get_values(p_result); vertex_t h_vertices[num_vertices]; weight_t h_pageranks[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_pageranks, pageranks, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(h_result[h_vertices[i]], h_pageranks[i], 0.001), "pagerank results don't match"); } cugraph_centrality_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int generic_personalized_pagerank_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, weight_t* h_result, vertex_t* h_personalization_vertices, weight_t* h_personalization_values, size_t num_vertices, size_t num_edges, size_t num_personalization_vertices, bool_t store_transposed, double alpha, double epsilon, size_t max_iterations) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_centrality_result_t* p_result = NULL; cugraph_type_erased_device_array_t* personalization_vertices = NULL; cugraph_type_erased_device_array_t* personalization_values = NULL; cugraph_type_erased_device_array_view_t* personalization_vertices_view = NULL; cugraph_type_erased_device_array_view_t* personalization_values_view = NULL; data_type_id_t vertex_tid = INT32; data_type_id_t weight_tid = FLOAT32; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_type_erased_device_array_create( p_handle, num_personalization_vertices, vertex_tid, &personalization_vertices, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_vertices create failed."); ret_code = cugraph_type_erased_device_array_create( p_handle, num_personalization_vertices, weight_tid, &personalization_values, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_values create failed."); personalization_vertices_view = cugraph_type_erased_device_array_view(personalization_vertices); personalization_values_view = cugraph_type_erased_device_array_view(personalization_values); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, personalization_vertices_view, (byte_t*)h_personalization_vertices, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_vertices copy_from_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, personalization_values_view, (byte_t*)h_personalization_values, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_values copy_from_host failed."); ret_code = cugraph_personalized_pagerank(p_handle, p_graph, NULL, NULL, NULL, NULL, personalization_vertices_view, personalization_values_view, alpha, epsilon, max_iterations, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_personalized_pagerank failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, "cugraph_personalized_pagerank failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* pageranks; vertices = cugraph_centrality_result_get_vertices(p_result); pageranks = cugraph_centrality_result_get_values(p_result); vertex_t h_vertices[num_vertices]; weight_t h_pageranks[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_pageranks, pageranks, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(h_result[h_vertices[i]], h_pageranks[i], 0.001), "pagerank results don't match"); } cugraph_centrality_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int generic_personalized_pagerank_nonconverging_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, weight_t* h_result, vertex_t* h_personalization_vertices, weight_t* h_personalization_values, size_t num_vertices, size_t num_edges, size_t num_personalization_vertices, bool_t store_transposed, double alpha, double epsilon, size_t max_iterations) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_centrality_result_t* p_result = NULL; cugraph_type_erased_device_array_t* personalization_vertices = NULL; cugraph_type_erased_device_array_t* personalization_values = NULL; cugraph_type_erased_device_array_view_t* personalization_vertices_view = NULL; cugraph_type_erased_device_array_view_t* personalization_values_view = NULL; data_type_id_t vertex_tid = INT32; data_type_id_t weight_tid = FLOAT32; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_type_erased_device_array_create( p_handle, num_personalization_vertices, vertex_tid, &personalization_vertices, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_vertices create failed."); ret_code = cugraph_type_erased_device_array_create( p_handle, num_personalization_vertices, weight_tid, &personalization_values, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_values create failed."); personalization_vertices_view = cugraph_type_erased_device_array_view(personalization_vertices); personalization_values_view = cugraph_type_erased_device_array_view(personalization_values); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, personalization_vertices_view, (byte_t*)h_personalization_vertices, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_vertices copy_from_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, personalization_values_view, (byte_t*)h_personalization_values, &ret_error); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "personalization_values copy_from_host failed."); ret_code = cugraph_personalized_pagerank_allow_nonconvergence(p_handle, p_graph, NULL, NULL, NULL, NULL, personalization_vertices_view, personalization_values_view, alpha, epsilon, max_iterations, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_personalized_pagerank failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, "cugraph_personalized_pagerank failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* pageranks; vertices = cugraph_centrality_result_get_vertices(p_result); pageranks = cugraph_centrality_result_get_values(p_result); vertex_t h_vertices[num_vertices]; weight_t h_pageranks[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_pageranks, pageranks, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(h_result[h_vertices[i]], h_pageranks[i], 0.001), "pagerank results don't match"); } cugraph_centrality_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int test_pagerank() { size_t num_edges = 8; size_t num_vertices = 6; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; weight_t h_result[] = {0.0915528, 0.168382, 0.0656831, 0.191468, 0.120677, 0.362237}; double alpha = 0.95; double epsilon = 0.0001; size_t max_iterations = 20; // Pagerank wants store_transposed = TRUE return generic_pagerank_test( h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, TRUE, alpha, epsilon, max_iterations); } int test_pagerank_with_transpose() { size_t num_edges = 8; size_t num_vertices = 6; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; weight_t h_result[] = {0.0915528, 0.168382, 0.0656831, 0.191468, 0.120677, 0.362237}; double alpha = 0.95; double epsilon = 0.0001; size_t max_iterations = 20; // Pagerank wants store_transposed = TRUE // This call will force cugraph_pagerank to transpose the graph // But we're passing src/dst backwards so the results will be the same return generic_pagerank_test( h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, FALSE, alpha, epsilon, max_iterations); } int test_pagerank_4() { size_t num_edges = 3; size_t num_vertices = 4; vertex_t h_src[] = {0, 1, 2}; vertex_t h_dst[] = {1, 2, 3}; weight_t h_wgt[] = {1.f, 1.f, 1.f}; weight_t h_result[] = { 0.11615584790706635f, 0.21488840878009796f, 0.29881080985069275f, 0.37014490365982056f}; double alpha = 0.85; double epsilon = 1.0e-6; size_t max_iterations = 500; return generic_pagerank_test( h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, FALSE, alpha, epsilon, max_iterations); } int test_pagerank_4_with_transpose() { size_t num_edges = 3; size_t num_vertices = 4; vertex_t h_src[] = {0, 1, 2}; vertex_t h_dst[] = {1, 2, 3}; weight_t h_wgt[] = {1.f, 1.f, 1.f}; weight_t h_result[] = { 0.11615584790706635f, 0.21488840878009796f, 0.29881080985069275f, 0.37014490365982056f}; double alpha = 0.85; double epsilon = 1.0e-6; size_t max_iterations = 500; return generic_pagerank_test( h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, TRUE, alpha, epsilon, max_iterations); } int test_pagerank_non_convergence() { size_t num_edges = 8; size_t num_vertices = 6; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; weight_t h_result[] = {0.0776471, 0.167637, 0.0639699, 0.220202, 0.140046, 0.330498}; double alpha = 0.95; double epsilon = 0.0001; size_t max_iterations = 2; // Pagerank wants store_transposed = TRUE return generic_pagerank_nonconverging_test( h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, TRUE, alpha, epsilon, max_iterations); } int test_personalized_pagerank() { size_t num_edges = 3; size_t num_vertices = 4; vertex_t h_src[] = {0, 1, 2}; vertex_t h_dst[] = {1, 2, 3}; weight_t h_wgt[] = {1.f, 1.f, 1.f}; weight_t h_result[] = {0.0559233f, 0.159381f, 0.303244f, 0.481451f}; vertex_t h_personalized_vertices[] = {0, 1, 2, 3}; weight_t h_personalized_values[] = {0.1, 0.2, 0.3, 0.4}; double alpha = 0.85; double epsilon = 1.0e-6; size_t max_iterations = 500; return generic_personalized_pagerank_test(h_src, h_dst, h_wgt, h_result, h_personalized_vertices, h_personalized_values, num_vertices, num_edges, num_vertices, FALSE, alpha, epsilon, max_iterations); } int test_personalized_pagerank_non_convergence() { size_t num_edges = 3; size_t num_vertices = 4; vertex_t h_src[] = {0, 1, 2}; vertex_t h_dst[] = {1, 2, 3}; weight_t h_wgt[] = {1.f, 1.f, 1.f}; weight_t h_result[] = { 0.03625, 0.285, 0.32125, 0.3575 }; vertex_t h_personalized_vertices[] = {0, 1, 2, 3}; weight_t h_personalized_values[] = {0.1, 0.2, 0.3, 0.4}; double alpha = 0.85; double epsilon = 1.0e-6; size_t max_iterations = 1; return generic_personalized_pagerank_nonconverging_test(h_src, h_dst, h_wgt, h_result, h_personalized_vertices, h_personalized_values, num_vertices, num_edges, num_vertices, FALSE, alpha, epsilon, max_iterations); } /******************************************************************************/ int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_pagerank); result |= RUN_TEST(test_pagerank_with_transpose); result |= RUN_TEST(test_pagerank_4); result |= RUN_TEST(test_pagerank_4_with_transpose); result |= RUN_TEST(test_pagerank_non_convergence); result |= RUN_TEST(test_personalized_pagerank); result |= RUN_TEST(test_personalized_pagerank_non_convergence); return result; }
24,571
41.958042
100
c
cugraph-branch-23.08/cpp/tests/c_api/sg_random_walks_test.c
cugraph-branch-23.08/cpp/tests/c_api/sg_random_walks_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_uniform_random_walks_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, size_t num_vertices, size_t num_edges, vertex_t* h_start, size_t num_starts, size_t max_depth, bool_t renumber, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_resource_handle_t* handle = NULL; cugraph_graph_t* graph = NULL; cugraph_random_walk_result_t* result = NULL; cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, renumber, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)h_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start copy_from_host failed."); ret_code = cugraph_uniform_random_walks(handle, graph, d_start_view, max_depth, &result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "uniform_random_walks failed."); cugraph_type_erased_device_array_view_t* verts; cugraph_type_erased_device_array_view_t* wgts; verts = cugraph_random_walk_result_get_paths(result); wgts = cugraph_random_walk_result_get_weights(result); size_t verts_size = cugraph_type_erased_device_array_view_size(verts); size_t wgts_size = cugraph_type_erased_device_array_view_size(wgts); vertex_t h_result_verts[verts_size]; weight_t h_result_wgts[wgts_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_verts, verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_wgts, wgts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph size_t unrenumbered_vertex_size = num_vertices; for (size_t i = 0 ; i < num_edges ; ++i) { if (h_src[i] > unrenumbered_vertex_size) unrenumbered_vertex_size = h_src[i]; if (h_dst[i] > unrenumbered_vertex_size) unrenumbered_vertex_size = h_dst[i]; } ++unrenumbered_vertex_size; weight_t M[unrenumbered_vertex_size][unrenumbered_vertex_size]; for (int i = 0; i < unrenumbered_vertex_size; ++i) for (int j = 0; j < unrenumbered_vertex_size; ++j) M[i][j] = -1; for (int i = 0; i < num_edges; ++i) M[h_src[i]][h_dst[i]] = h_wgt[i]; TEST_ASSERT(test_ret_value, cugraph_random_walk_result_get_max_path_length(result) == max_depth, "path length does not match"); for (int i = 0; (i < num_starts) && (test_ret_value == 0); ++i) { TEST_ASSERT( test_ret_value, h_start[i] == h_result_verts[i * (max_depth + 1)], "start of path not found"); for (size_t j = 0; j < max_depth; ++j) { int src_index = i * (max_depth + 1) + j; int dst_index = src_index + 1; if (h_result_verts[dst_index] < 0) { if (h_result_verts[src_index] >= 0) { int departing_count = 0; for (int k = 0; k < num_vertices; ++k) { if (M[h_result_verts[src_index]][k] >= 0) departing_count++; } TEST_ASSERT(test_ret_value, departing_count == 0, "uniform_random_walks found no edge when an edge exists"); } } else { TEST_ASSERT( test_ret_value, M[h_result_verts[src_index]][h_result_verts[dst_index]] == h_result_wgts[i * max_depth + j], "uniform_random_walks got edge that doesn't exist"); } } } cugraph_random_walk_result_free(result); cugraph_sg_graph_free(graph); cugraph_free_resource_handle(handle); cugraph_error_free(ret_error); return test_ret_value; } int generic_biased_random_walks_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, size_t num_vertices, size_t num_edges, vertex_t* h_start, size_t num_starts, size_t max_depth, bool_t renumber, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_resource_handle_t* handle = NULL; cugraph_graph_t* graph = NULL; cugraph_random_walk_result_t* result = NULL; cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, renumber, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)h_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start copy_from_host failed."); ret_code = cugraph_biased_random_walks(handle, graph, d_start_view, max_depth, &result, &ret_error); #if 1 TEST_ASSERT(test_ret_value, ret_code != CUGRAPH_SUCCESS, "biased_random_walks should have failed") #else TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "biased_random_walks failed."); cugraph_type_erased_device_array_view_t* verts; cugraph_type_erased_device_array_view_t* wgts; verts = cugraph_random_walk_result_get_paths(result); wgts = cugraph_random_walk_result_get_weights(result); size_t verts_size = cugraph_type_erased_device_array_view_size(verts); size_t wgts_size = cugraph_type_erased_device_array_view_size(wgts); vertex_t h_result_verts[verts_size]; vertex_t h_result_wgts[wgts_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host(handle, (byte_t*)h_verts, verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_wgts, wgts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph weight_t M[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) M[i][j] = -1; for (int i = 0; i < num_edges; ++i) M[h_src[i]][h_dst[i]] = h_wgt[i]; TEST_ASSERT(test_ret_value, cugraph_random_walk_result_get_max_path_length() == max_depth, "path length does not match"); for (int i = 0; (i < num_starts) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M[h_start[i]][h_result_verts[i * (max_depth + 1)]] == h_result_wgts[i * max_depth], "biased_random_walks got edge that doesn't exist"); for (size_t j = 1; j < cugraph_random_walk_result_get_max_path_length(); ++j) TEST_ASSERT( test_ret_value, M[h_start[i * (max_depth + 1) + j - 1]][h_result_verts[i * (max_depth + 1) + j]] == h_result_wgts[i * max_depth + j - 1], "biased_random_walks got edge that doesn't exist"); } cugraph_random_walk_result_free(result); #endif cugraph_sg_graph_free(graph); cugraph_free_resource_handle(handle); cugraph_error_free(ret_error); return test_ret_value; } int generic_node2vec_random_walks_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, size_t num_vertices, size_t num_edges, vertex_t* h_start, size_t num_starts, size_t max_depth, weight_t p, weight_t q, bool_t renumber, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_resource_handle_t* handle = NULL; cugraph_graph_t* graph = NULL; cugraph_random_walk_result_t* result = NULL; cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, renumber, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)h_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start copy_from_host failed."); ret_code = cugraph_node2vec_random_walks( handle, graph, d_start_view, max_depth, p, q, &result, &ret_error); #if 1 TEST_ASSERT( test_ret_value, ret_code != CUGRAPH_SUCCESS, "node2vec_random_walks should have failed") #else TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "node2vec_random_walks failed."); cugraph_type_erased_device_array_view_t* verts; cugraph_type_erased_device_array_view_t* wgts; verts = cugraph_random_walk_result_get_paths(result); wgts = cugraph_random_walk_result_get_weights(result); size_t verts_size = cugraph_type_erased_device_array_view_size(verts); size_t wgts_size = cugraph_type_erased_device_array_view_size(wgts); vertex_t h_result_verts[verts_size]; vertex_t h_result_wgts[wgts_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host(handle, (byte_t*)h_verts, verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_wgts, wgts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph weight_t M[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) M[i][j] = -1; for (int i = 0; i < num_edges; ++i) M[h_src[i]][h_dst[i]] = h_wgt[i]; TEST_ASSERT(test_ret_value, cugraph_random_walk_result_get_max_path_length() == max_depth, "path length does not match"); for (int i = 0; (i < num_starts) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M[h_start[i]][h_result_verts[i * (max_depth + 1)]] == h_result_wgts[i * max_depth], "node2vec_random_walks got edge that doesn't exist"); for (size_t j = 1; j < max_depth; ++j) TEST_ASSERT( test_ret_value, M[h_start[i * (max_depth + 1) + j - 1]][h_result_verts[i * (max_depth + 1) + j]] == h_result_wgts[i * max_depth + j - 1], "node2vec_random_walks got edge that doesn't exist"); } cugraph_random_walk_result_free(result); #endif cugraph_sg_graph_free(graph); cugraph_free_resource_handle(handle); cugraph_error_free(ret_error); return test_ret_value; } int test_uniform_random_walks() { size_t num_edges = 8; size_t num_vertices = 6; size_t num_starts = 2; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t wgt[] = {0, 1, 2, 3, 4, 5, 6, 7}; vertex_t start[] = {2, 2}; return generic_uniform_random_walks_test( src, dst, wgt, num_vertices, num_edges, start, num_starts, 3, FALSE, FALSE); } int test_biased_random_walks() { size_t num_edges = 8; size_t num_vertices = 6; size_t num_starts = 2; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t wgt[] = {0, 1, 2, 3, 4, 5, 6, 7}; vertex_t start[] = {2, 2}; return generic_biased_random_walks_test( src, dst, wgt, num_vertices, num_edges, start, num_starts, 3, FALSE, FALSE); } int test_node2vec_random_walks() { size_t num_edges = 8; size_t num_vertices = 6; size_t num_starts = 2; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; weight_t wgt[] = {0, 1, 2, 3, 4, 5, 6, 7}; vertex_t start[] = {2, 2}; weight_t p = 5; weight_t q = 9; return generic_node2vec_random_walks_test( src, dst, wgt, num_vertices, num_edges, start, num_starts, 3, p, q, FALSE, FALSE); } int test_uniform_random_walks_oob() { size_t num_edges = 5; size_t num_vertices = 6; size_t num_starts = 4; size_t max_depth = 7; vertex_t src[] = {1, 2, 4, 7, 3}; vertex_t dst[] = {5, 4, 1, 5, 2}; weight_t wgt[] = {0.4, 0.5, 0.6, 0.7, 0.8}; vertex_t start[] = {2, 5, 3, 1}; return generic_uniform_random_walks_test( src, dst, wgt, num_vertices, num_edges, start, num_starts, max_depth, TRUE, FALSE); } int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_uniform_random_walks); result |= RUN_TEST(test_biased_random_walks); result |= RUN_TEST(test_node2vec_random_walks); result |= RUN_TEST(test_uniform_random_walks_oob); return result; }
16,722
36.749436
100
c
cugraph-branch-23.08/cpp/tests/c_api/similarity_test.c
cugraph-branch-23.08/cpp/tests/c_api/similarity_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/array.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; typedef enum { JACCARD, SORENSEN, OVERLAP } similarity_t; int generic_similarity_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_first, vertex_t* h_second, weight_t* h_result, size_t num_vertices, size_t num_edges, size_t num_pairs, bool_t store_transposed, bool_t use_weight, similarity_t test_type) { int test_ret_value = 0; data_type_id_t vertex_tid = INT32; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* handle = NULL; cugraph_graph_t* graph = NULL; cugraph_similarity_result_t* result = NULL; cugraph_vertex_pairs_t* vertex_pairs = NULL; cugraph_type_erased_device_array_t* v1 = NULL; cugraph_type_erased_device_array_t* v2 = NULL; cugraph_type_erased_device_array_view_t* v1_view = NULL; cugraph_type_erased_device_array_view_t* v2_view = NULL; handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, TRUE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_type_erased_device_array_create(handle, num_pairs, vertex_tid, &v1, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "v1 create failed."); ret_code = cugraph_type_erased_device_array_create(handle, num_pairs, vertex_tid, &v2, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "v2 create failed."); v1_view = cugraph_type_erased_device_array_view(v1); v2_view = cugraph_type_erased_device_array_view(v2); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, v1_view, (byte_t*)h_first, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "h_first copy_from_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, v2_view, (byte_t*)h_second, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "h_second copy_from_host failed."); ret_code = cugraph_create_vertex_pairs(handle, graph, v1_view, v2_view, &vertex_pairs, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create vertex pairs failed."); switch (test_type) { case JACCARD: ret_code = cugraph_jaccard_coefficients( handle, graph, vertex_pairs, use_weight, FALSE, &result, &ret_error); break; case SORENSEN: ret_code = cugraph_sorensen_coefficients( handle, graph, vertex_pairs, use_weight, FALSE, &result, &ret_error); break; case OVERLAP: ret_code = cugraph_overlap_coefficients( handle, graph, vertex_pairs, use_weight, FALSE, &result, &ret_error); break; } TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph similarity failed."); cugraph_type_erased_device_array_view_t* similarity_coefficient; similarity_coefficient = cugraph_similarity_result_get_similarity(result); weight_t h_similarity_coefficient[num_pairs]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_similarity_coefficient, similarity_coefficient, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_pairs) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(h_similarity_coefficient[i], h_result[i], 0.001), "similarity results don't match"); } if (result != NULL) cugraph_similarity_result_free(result); if (vertex_pairs != NULL) cugraph_vertex_pairs_free(vertex_pairs); cugraph_sg_graph_free(graph); cugraph_free_resource_handle(handle); cugraph_error_free(ret_error); return test_ret_value; } int test_jaccard() { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0.2, 0.666667, 0.333333, 0.4, 0.166667, 0.5, 0.2, 0.25, 0.25, 0.666667}; return generic_similarity_test(h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, FALSE, JACCARD); } int test_weighted_jaccard() { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // TODO: Fill in return generic_similarity_test(h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, TRUE, JACCARD); } int test_sorensen() { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0.333333, 0.8, 0.5, 0.571429, 0.285714, 0.666667, 0.333333, 0.4, 0.4, 0.8}; return generic_similarity_test(h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, FALSE, SORENSEN); } int test_weighted_sorensen() { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // TODO: Fill in return generic_similarity_test(h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, TRUE, SORENSEN); } int test_overlap() { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0.5, 1, 0.5, 0.666667, 0.333333, 1, 0.333333, 0.5, 0.5, 1}; return generic_similarity_test(h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, FALSE, OVERLAP); } int test_weighted_overlap() { size_t num_edges = 16; size_t num_vertices = 6; size_t num_pairs = 10; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_first[] = {0, 0, 0, 1, 1, 1, 2, 2, 2, 3}; vertex_t h_second[] = {1, 3, 4, 2, 3, 5, 3, 4, 5, 4}; weight_t h_result[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; // TODO: Fill in return generic_similarity_test(h_src, h_dst, h_wgt, h_first, h_second, h_result, num_vertices, num_edges, num_pairs, FALSE, TRUE, OVERLAP); } /******************************************************************************/ int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_jaccard); result |= RUN_TEST(test_sorensen); result |= RUN_TEST(test_overlap); // result |= RUN_TEST(test_weighted_jaccard); // result |= RUN_TEST(test_weighted_sorensen); // result |= RUN_TEST(test_weighted_overlap); return result; }
11,902
37.521036
100
c
cugraph-branch-23.08/cpp/tests/c_api/sssp_test.c
cugraph-branch-23.08/cpp/tests/c_api/sssp_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <float.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; const float EPSILON = 0.001; int generic_sssp_test(vertex_t* h_src, vertex_t* h_dst, float* h_wgt, vertex_t source, float const* expected_distances, vertex_t const* expected_predecessors, size_t num_vertices, size_t num_edges, float cutoff, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_paths_result_t* p_result = NULL; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, FALSE, &p_graph, &ret_error); ret_code = cugraph_sssp(p_handle, p_graph, source, cutoff, TRUE, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_sssp failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* distances; cugraph_type_erased_device_array_view_t* predecessors; vertices = cugraph_paths_result_get_vertices(p_result); distances = cugraph_paths_result_get_distances(p_result); predecessors = cugraph_paths_result_get_predecessors(p_result); vertex_t h_vertices[num_vertices]; float h_distances[num_vertices]; vertex_t h_predecessors[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_distances, distances, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_predecessors, predecessors, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqual(expected_distances[h_vertices[i]], h_distances[i], EPSILON), "sssp distances don't match"); TEST_ASSERT(test_ret_value, expected_predecessors[h_vertices[i]] == h_predecessors[i], "sssp predecessors don't match"); } cugraph_type_erased_device_array_view_free(vertices); cugraph_type_erased_device_array_view_free(distances); cugraph_type_erased_device_array_view_free(predecessors); cugraph_paths_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int generic_sssp_test_double(vertex_t* h_src, vertex_t* h_dst, double* h_wgt, vertex_t source, double const* expected_distances, vertex_t const* expected_predecessors, size_t num_vertices, size_t num_edges, double cutoff, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_paths_result_t* p_result = NULL; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph_double( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, FALSE, &p_graph, &ret_error); ret_code = cugraph_sssp(p_handle, p_graph, source, cutoff, TRUE, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_sssp failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* distances; cugraph_type_erased_device_array_view_t* predecessors; vertices = cugraph_paths_result_get_vertices(p_result); distances = cugraph_paths_result_get_distances(p_result); predecessors = cugraph_paths_result_get_predecessors(p_result); vertex_t h_vertices[num_vertices]; double h_distances[num_vertices]; vertex_t h_predecessors[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_distances, distances, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_predecessors, predecessors, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, nearlyEqualDouble(expected_distances[h_vertices[i]], h_distances[i], EPSILON), "sssp distances don't match"); TEST_ASSERT(test_ret_value, expected_predecessors[h_vertices[i]] == h_predecessors[i], "sssp predecessors don't match"); } cugraph_type_erased_device_array_view_free(vertices); cugraph_type_erased_device_array_view_free(distances); cugraph_type_erased_device_array_view_free(predecessors); cugraph_paths_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int test_sssp() { size_t num_edges = 8; size_t num_vertices = 6; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; float wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; float expected_distances[] = {0.0f, 0.1f, FLT_MAX, 2.2f, 1.2f, 4.4f}; vertex_t expected_predecessors[] = {-1, 0, -1, 1, 1, 4}; // Bfs wants store_transposed = FALSE return generic_sssp_test(src, dst, wgt, 0, expected_distances, expected_predecessors, num_vertices, num_edges, 10, FALSE); } int test_sssp_with_transpose() { size_t num_edges = 8; size_t num_vertices = 6; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; float wgt[] = {0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; float expected_distances[] = {0.0f, 0.1f, FLT_MAX, 2.2f, 1.2f, 4.4f}; vertex_t expected_predecessors[] = {-1, 0, -1, 1, 1, 4}; // Bfs wants store_transposed = FALSE // This call will force cugraph_sssp to transpose the graph return generic_sssp_test( src, dst, wgt, 0, expected_distances, expected_predecessors, num_vertices, num_edges, 10, TRUE); } int test_sssp_with_transpose_double() { size_t num_edges = 8; size_t num_vertices = 6; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; double wgt[] = {0.1d, 2.1d, 1.1d, 5.1d, 3.1d, 4.1d, 7.2d, 3.2d}; double expected_distances[] = {0.0d, 0.1d, DBL_MAX, 2.2d, 1.2d, 4.4d}; vertex_t expected_predecessors[] = {-1, 0, -1, 1, 1, 4}; // Bfs wants store_transposed = FALSE // This call will force cugraph_sssp to transpose the graph return generic_sssp_test_double( src, dst, wgt, 0, expected_distances, expected_predecessors, num_vertices, num_edges, 10, TRUE); } /******************************************************************************/ int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_sssp); result |= RUN_TEST(test_sssp_with_transpose); result |= RUN_TEST(test_sssp_with_transpose_double); return result; }
9,322
36.898374
100
c
cugraph-branch-23.08/cpp/tests/c_api/strongly_connected_components_test.c
cugraph-branch-23.08/cpp/tests/c_api/strongly_connected_components_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_scc_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_result, size_t num_vertices, size_t num_edges, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_labeling_result_t* p_result = NULL; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, FALSE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_strongly_connected_components(p_handle, p_graph, FALSE, &p_result, &ret_error); // FIXME: Actual implementation will be something like this TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_strongly_connected_components failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* components; vertices = cugraph_labeling_result_get_vertices(p_result); components = cugraph_labeling_result_get_labels(p_result); vertex_t h_vertices[num_vertices]; vertex_t h_components[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_components, components, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); vertex_t component_check[num_vertices]; for (vertex_t i = 0; i < num_vertices; ++i) { component_check[i] = num_vertices; } for (vertex_t i = 0 ; i < num_vertices; ++i) { if (component_check[h_result[i]] == num_vertices) component_check[h_result[i]] = h_components[i]; } for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, h_components[i] == component_check[h_result[i]], "component results don't match"); } cugraph_type_erased_device_array_view_free(components); cugraph_type_erased_device_array_view_free(vertices); cugraph_labeling_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int test_strongly_connected_components() { size_t num_edges = 19; size_t num_vertices = 12; vertex_t h_src[] = {0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 6, 7, 7, 8, 8, 8, 9, 10}; vertex_t h_dst[] = {1, 2, 3, 4, 0, 1, 3, 4, 5, 3, 5, 7, 9, 10, 6, 7, 9, 11, 11}; weight_t h_wgt[] = { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_result[] = {0, 0, 0, 3, 3, 5, 6, 7, 8, 9, 10, 11}; // SCC wants store_transposed = FALSE return generic_scc_test(h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_strongly_connected_components); return result; }
4,478
34.267717
114
c
cugraph-branch-23.08/cpp/tests/c_api/triangle_count_test.c
cugraph-branch-23.08/cpp/tests/c_api/triangle_count_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_triangle_count_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_verts, edge_t* h_result, size_t num_vertices, size_t num_edges, size_t num_results, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_triangle_count_result_t* p_result = NULL; cugraph_type_erased_device_array_t* p_start = NULL; cugraph_type_erased_device_array_view_t* p_start_view = NULL; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, TRUE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); if (h_verts != NULL) { ret_code = cugraph_type_erased_device_array_create(p_handle, num_results, INT32, &p_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "p_start create failed."); p_start_view = cugraph_type_erased_device_array_view(p_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, p_start_view, (byte_t*)h_verts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "src copy_from_host failed."); } ret_code = cugraph_triangle_count(p_handle, p_graph, p_start_view, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, "cugraph_triangle_count failed."); if (test_ret_value == 0) { cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* counts; vertices = cugraph_triangle_count_result_get_vertices(p_result); counts = cugraph_triangle_count_result_get_counts(p_result); TEST_ASSERT(test_ret_value, cugraph_type_erased_device_array_view_size(vertices) == num_results, "invalid number of results"); vertex_t num_local_results = cugraph_type_erased_device_array_view_size(vertices); vertex_t h_vertices[num_local_results]; edge_t h_counts[num_local_results]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_counts, counts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); for (int i = 0; (i < num_local_results) && (test_ret_value == 0); ++i) { TEST_ASSERT( test_ret_value, h_result[i] == h_counts[i], "counts results don't match"); } cugraph_triangle_count_result_free(p_result); } cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int test_triangle_count() { size_t num_edges = 16; size_t num_vertices = 6; size_t num_results = 4; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4}; weight_t h_wgt[] = { 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f, 0.1f, 2.1f, 1.1f, 5.1f, 3.1f, 4.1f, 7.2f, 3.2f}; vertex_t h_verts[] = {0, 1, 2, 4}; edge_t h_result[] = {1, 2, 2, 0}; // Triangle Count wants store_transposed = FALSE return generic_triangle_count_test( h_src, h_dst, h_wgt, h_verts, h_result, num_vertices, num_edges, num_results, FALSE); } int test_triangle_count_dolphins() { size_t num_edges = 318; size_t num_vertices = 62; vertex_t h_src[] = { 10, 14, 15, 40, 42, 47, 17, 19, 26, 27, 28, 36, 41, 54, 10, 42, 44, 61, 8, 14, 59, 51, 9, 13, 56, 57, 9, 13, 17, 54, 56, 57, 19, 27, 30, 40, 54, 20, 28, 37, 45, 59, 13, 17, 32, 41, 57, 29, 42, 47, 51, 33, 17, 32, 41, 54, 57, 16, 24, 33, 34, 37, 38, 40, 43, 50, 52, 18, 24, 40, 45, 55, 59, 20, 33, 37, 38, 50, 22, 25, 27, 31, 57, 20, 21, 24, 29, 45, 51, 30, 54, 28, 36, 38, 44, 47, 50, 29, 33, 37, 45, 51, 36, 45, 51, 29, 45, 51, 26, 27, 27, 30, 47, 35, 43, 45, 51, 52, 42, 47, 60, 34, 37, 38, 40, 43, 50, 37, 44, 49, 37, 39, 40, 59, 40, 43, 45, 61, 43, 44, 52, 58, 57, 52, 54, 57, 47, 50, 46, 53, 50, 51, 59, 49, 57, 51, 55, 61, 57, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 11, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 23, 23, 23, 24, 24, 24, 25, 25, 26, 28, 28, 29, 29, 29, 29, 29, 30, 30, 32, 33, 33, 33, 33, 33, 33, 34, 34, 34, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 40, 41, 41, 42, 42, 43, 43, 45, 45, 45, 46, 48, 50, 51, 53, 54}; vertex_t h_dst[] = { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 10, 10, 10, 11, 12, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 19, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 23, 23, 23, 24, 24, 24, 25, 25, 26, 28, 28, 29, 29, 29, 29, 29, 30, 30, 32, 33, 33, 33, 33, 33, 33, 34, 34, 34, 36, 36, 36, 36, 37, 37, 37, 37, 38, 38, 38, 38, 39, 40, 41, 41, 42, 42, 43, 43, 45, 45, 45, 46, 48, 50, 51, 53, 54, 10, 14, 15, 40, 42, 47, 17, 19, 26, 27, 28, 36, 41, 54, 10, 42, 44, 61, 8, 14, 59, 51, 9, 13, 56, 57, 9, 13, 17, 54, 56, 57, 19, 27, 30, 40, 54, 20, 28, 37, 45, 59, 13, 17, 32, 41, 57, 29, 42, 47, 51, 33, 17, 32, 41, 54, 57, 16, 24, 33, 34, 37, 38, 40, 43, 50, 52, 18, 24, 40, 45, 55, 59, 20, 33, 37, 38, 50, 22, 25, 27, 31, 57, 20, 21, 24, 29, 45, 51, 30, 54, 28, 36, 38, 44, 47, 50, 29, 33, 37, 45, 51, 36, 45, 51, 29, 45, 51, 26, 27, 27, 30, 47, 35, 43, 45, 51, 52, 42, 47, 60, 34, 37, 38, 40, 43, 50, 37, 44, 49, 37, 39, 40, 59, 40, 43, 45, 61, 43, 44, 52, 58, 57, 52, 54, 57, 47, 50, 46, 53, 50, 51, 59, 49, 57, 51, 55, 61, 57}; weight_t h_wgt[] = { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; vertex_t h_verts[] = {11, 48, 0}; edge_t h_result[] = {0, 0, 5}; size_t num_results = 3; // Triangle Count wants store_transposed = FALSE return generic_triangle_count_test( h_src, h_dst, h_wgt, h_verts, h_result, num_vertices, num_edges, num_results, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_triangle_count); result |= RUN_TEST(test_triangle_count_dolphins); return result; }
10,334
48.6875
100
c
cugraph-branch-23.08/cpp/tests/c_api/two_hop_neighbors_test.c
cugraph-branch-23.08/cpp/tests/c_api/two_hop_neighbors_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/array.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_two_hop_nbr_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_sources, vertex_t* h_result_v1, vertex_t* h_result_v2, size_t num_vertices, size_t num_edges, size_t num_sources, size_t num_result_pairs, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* resource_handle = NULL; cugraph_graph_t* graph = NULL; cugraph_type_erased_device_array_t* start_vertices = NULL; cugraph_type_erased_device_array_view_t* start_vertices_view = NULL; cugraph_vertex_pairs_t* result = NULL; resource_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, resource_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph(resource_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, TRUE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ALWAYS_ASSERT(ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); if (num_sources > 0) { ret_code = cugraph_type_erased_device_array_create( resource_handle, num_sources, INT32, &start_vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "p_sources create failed."); start_vertices_view = cugraph_type_erased_device_array_view(start_vertices); ret_code = cugraph_type_erased_device_array_view_copy_from_host( resource_handle, start_vertices_view, (byte_t*)h_sources, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "src copy_from_host failed."); } ret_code = cugraph_two_hop_neighbors( resource_handle, graph, start_vertices_view, FALSE, &result, &ret_error); cugraph_type_erased_device_array_view_t* v1; cugraph_type_erased_device_array_view_t* v2; v1 = cugraph_vertex_pairs_get_first(result); v2 = cugraph_vertex_pairs_get_second(result); size_t number_of_pairs = cugraph_type_erased_device_array_view_size(v1); vertex_t h_v1[number_of_pairs]; vertex_t h_v2[number_of_pairs]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( resource_handle, (byte_t*)h_v1, v1, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( resource_handle, (byte_t*)h_v2, v2, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); bool_t M[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) M[i][j] = FALSE; for (int i = 0; i < num_result_pairs; ++i) M[h_result_v1[i]][h_result_v2[i]] = TRUE; TEST_ASSERT(test_ret_value, number_of_pairs == num_result_pairs, "results are different sizes"); for (int i = 0; (i < number_of_pairs) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M[h_v1[i]][h_v2[i]], "result not found"); } cugraph_vertex_pairs_free(result); cugraph_type_erased_device_array_view_free(start_vertices_view); cugraph_type_erased_device_array_free(start_vertices); cugraph_sg_graph_free(graph); cugraph_free_resource_handle(resource_handle); cugraph_error_free(ret_error); return test_ret_value; } int test_two_hop_nbr_all() { size_t num_edges = 22; size_t num_vertices = 7; size_t num_sources = 0; size_t num_result_pairs = 43; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5, 3, 1, 4, 5, 5, 6}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 3, 1, 6, 5}; weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_result_v1[] = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6}; vertex_t h_result_v2[] = {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 1, 3, 4, 6}; return generic_two_hop_nbr_test(h_src, h_dst, h_wgt, NULL, h_result_v1, h_result_v2, num_vertices, num_edges, num_sources, num_result_pairs, FALSE); } int test_two_hop_nbr_one() { size_t num_edges = 22; size_t num_vertices = 7; size_t num_sources = 1; size_t num_result_pairs = 6; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 1, 3, 4, 0, 1, 3, 5, 5, 3, 1, 4, 5, 5, 6}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 3, 1, 6, 5}; weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_sources[] = {0}; vertex_t h_result_v1[] = {0, 0, 0, 0, 0, 0}; vertex_t h_result_v2[] = {0, 1, 2, 3, 4, 5}; return generic_two_hop_nbr_test(h_src, h_dst, h_wgt, h_sources, h_result_v1, h_result_v2, num_vertices, num_edges, num_sources, num_result_pairs, FALSE); } /******************************************************************************/ int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_two_hop_nbr_all); result |= RUN_TEST(test_two_hop_nbr_one); return result; }
7,528
37.413265
98
c
cugraph-branch-23.08/cpp/tests/c_api/uniform_neighbor_sample_test.c
cugraph-branch-23.08/cpp/tests/c_api/uniform_neighbor_sample_test.c
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int create_test_graph_with_edge_ids(const cugraph_resource_handle_t* p_handle, vertex_t* h_src, vertex_t* h_dst, edge_t* h_ids, size_t num_edges, bool_t store_transposed, bool_t renumber, bool_t is_symmetric, cugraph_graph_t** p_graph, cugraph_error_t** ret_error) { int test_ret_value = 0; cugraph_error_code_t ret_code; cugraph_graph_properties_t properties; properties.is_symmetric = is_symmetric; properties.is_multigraph = FALSE; data_type_id_t vertex_tid = INT32; data_type_id_t edge_tid = INT32; data_type_id_t weight_tid = FLOAT32; cugraph_type_erased_device_array_t* src; cugraph_type_erased_device_array_t* dst; cugraph_type_erased_device_array_t* ids; cugraph_type_erased_device_array_view_t* src_view; cugraph_type_erased_device_array_view_t* dst_view; cugraph_type_erased_device_array_view_t* ids_view; cugraph_type_erased_device_array_view_t* wgt_view; ret_code = cugraph_type_erased_device_array_create(p_handle, num_edges, vertex_tid, &src, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "src create failed."); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(*ret_error)); ret_code = cugraph_type_erased_device_array_create(p_handle, num_edges, vertex_tid, &dst, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "dst create failed."); ret_code = cugraph_type_erased_device_array_create(p_handle, num_edges, edge_tid, &ids, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "ids create failed."); src_view = cugraph_type_erased_device_array_view(src); dst_view = cugraph_type_erased_device_array_view(dst); ids_view = cugraph_type_erased_device_array_view(ids); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, src_view, (byte_t*)h_src, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "src copy_from_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, dst_view, (byte_t*)h_dst, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "dst copy_from_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_from_host( p_handle, ids_view, (byte_t*)h_ids, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "wgt copy_from_host failed."); ret_code = cugraph_type_erased_device_array_view_as_type(ids, weight_tid, &wgt_view, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "wgt cast from ids failed."); ret_code = cugraph_sg_graph_create(p_handle, &properties, src_view, dst_view, wgt_view, NULL, NULL, store_transposed, renumber, FALSE, p_graph, ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); cugraph_type_erased_device_array_view_free(wgt_view); cugraph_type_erased_device_array_view_free(ids_view); cugraph_type_erased_device_array_view_free(dst_view); cugraph_type_erased_device_array_view_free(src_view); cugraph_type_erased_device_array_free(ids); cugraph_type_erased_device_array_free(dst); cugraph_type_erased_device_array_free(src); return test_ret_value; } int test_uniform_neighbor_sample_with_properties(const cugraph_resource_handle_t* handle) { data_type_id_t vertex_tid = INT32; data_type_id_t edge_tid = INT32; data_type_id_t weight_tid = FLOAT32; data_type_id_t edge_id_tid = INT32; data_type_id_t edge_type_tid = INT32; size_t num_edges = 8; size_t num_vertices = 6; size_t fan_out_size = 1; size_t num_starts = 1; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; edge_t edge_ids[] = {0, 1, 2, 3, 4, 5, 6, 7}; weight_t weight[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}; int32_t edge_types[] = {7, 6, 5, 4, 3, 2, 1, 0}; vertex_t start[] = {2}; int fan_out[] = {-1}; // Create graph int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_graph_t* graph = NULL; cugraph_sample_result_t* result = NULL; ret_code = create_sg_test_graph(handle, vertex_tid, edge_tid, src, dst, weight_tid, weight, edge_type_tid, edge_types, edge_id_tid, edge_ids, num_edges, FALSE, TRUE, FALSE, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; cugraph_type_erased_host_array_view_t* h_fan_out_view = NULL; ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start copy_from_host failed."); h_fan_out_view = cugraph_type_erased_host_array_view_create(fan_out, 1, INT32); cugraph_rng_state_t *rng_state; ret_code = cugraph_rng_state_create(handle, 0, &rng_state, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "rng_state create failed."); ret_code = cugraph_uniform_neighbor_sample_with_edge_properties(handle, graph, d_start_view, NULL, NULL, NULL, h_fan_out_view, rng_state, FALSE, TRUE, FALSE, &result, &ret_error); #ifdef NO_CUGRAPH_OPS TEST_ASSERT( test_ret_value, ret_code != CUGRAPH_SUCCESS, "uniform_neighbor_sample should have failed") #else TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "uniform_neighbor_sample failed."); cugraph_type_erased_device_array_view_t* result_srcs; cugraph_type_erased_device_array_view_t* result_dsts; cugraph_type_erased_device_array_view_t* result_edge_id; cugraph_type_erased_device_array_view_t* result_weights; cugraph_type_erased_device_array_view_t* result_edge_types; cugraph_type_erased_device_array_view_t* result_hops; result_srcs = cugraph_sample_result_get_sources(result); result_dsts = cugraph_sample_result_get_destinations(result); result_edge_id = cugraph_sample_result_get_edge_id(result); result_weights = cugraph_sample_result_get_edge_weight(result); result_edge_types = cugraph_sample_result_get_edge_type(result); result_hops = cugraph_sample_result_get_hop(result); size_t result_size = cugraph_type_erased_device_array_view_size(result_srcs); vertex_t h_srcs[result_size]; vertex_t h_dsts[result_size]; edge_t h_edge_id[result_size]; weight_t h_weight[result_size]; int32_t h_edge_types[result_size]; int32_t h_hops[result_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_srcs, result_srcs, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_dsts, result_dsts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_edge_id, result_edge_id, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_weight, result_weights, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_edge_types, result_edge_types, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_hops, result_hops, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph weight_t M_w[num_vertices][num_vertices]; edge_t M_edge_id[num_vertices][num_vertices]; int32_t M_edge_type[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) { M_w[i][j] = 0.0; M_edge_id[i][j] = -1; M_edge_type[i][j] = -1; } for (int i = 0; i < num_edges; ++i) { M_w[src[i]][dst[i]] = weight[i]; M_edge_id[src[i]][dst[i]] = edge_ids[i]; M_edge_type[src[i]][dst[i]] = edge_types[i]; } for (int i = 0; (i < result_size) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M_w[h_srcs[i]][h_dsts[i]] == h_weight[i], "uniform_neighbor_sample got edge that doesn't exist"); TEST_ASSERT(test_ret_value, M_edge_id[h_srcs[i]][h_dsts[i]] == h_edge_id[i], "uniform_neighbor_sample got edge that doesn't exist"); TEST_ASSERT(test_ret_value, M_edge_type[h_srcs[i]][h_dsts[i]] == h_edge_types[i], "uniform_neighbor_sample got edge that doesn't exist"); } cugraph_sample_result_free(result); #endif cugraph_sg_graph_free(graph); cugraph_error_free(ret_error); } int test_uniform_neighbor_sample_with_labels(const cugraph_resource_handle_t* handle) { data_type_id_t vertex_tid = INT32; data_type_id_t edge_tid = INT32; data_type_id_t weight_tid = FLOAT32; data_type_id_t edge_id_tid = INT32; data_type_id_t edge_type_tid = INT32; size_t num_edges = 8; size_t num_vertices = 6; size_t fan_out_size = 1; size_t num_starts = 2; vertex_t src[] = {0, 1, 1, 2, 2, 2, 3, 4}; vertex_t dst[] = {1, 3, 4, 0, 1, 3, 5, 5}; edge_t edge_ids[] = {0, 1, 2, 3, 4, 5, 6, 7}; weight_t weight[] = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}; int32_t edge_types[] = {7, 6, 5, 4, 3, 2, 1, 0}; vertex_t start[] = {2, 3}; size_t start_labels[] = { 6, 12 }; int fan_out[] = {-1}; // Create graph int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error = NULL; cugraph_graph_t* graph = NULL; cugraph_sample_result_t* result = NULL; ret_code = create_sg_test_graph(handle, vertex_tid, edge_tid, src, dst, weight_tid, weight, edge_type_tid, edge_types, edge_id_tid, edge_ids, num_edges, FALSE, TRUE, FALSE, FALSE, &graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "graph creation failed."); cugraph_type_erased_device_array_t* d_start = NULL; cugraph_type_erased_device_array_view_t* d_start_view = NULL; cugraph_type_erased_device_array_t* d_start_labels = NULL; cugraph_type_erased_device_array_view_t* d_start_labels_view = NULL; cugraph_type_erased_host_array_view_t* h_fan_out_view = NULL; ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start create failed."); d_start_view = cugraph_type_erased_device_array_view(d_start); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_view, (byte_t*)start, &ret_error); ret_code = cugraph_type_erased_device_array_create(handle, num_starts, INT32, &d_start_labels, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "d_start_labels create failed."); d_start_labels_view = cugraph_type_erased_device_array_view(d_start_labels); ret_code = cugraph_type_erased_device_array_view_copy_from_host( handle, d_start_labels_view, (byte_t*)start_labels, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "start_labels copy_from_host failed."); h_fan_out_view = cugraph_type_erased_host_array_view_create(fan_out, 1, INT32); cugraph_rng_state_t *rng_state; ret_code = cugraph_rng_state_create(handle, 0, &rng_state, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "rng_state create failed."); ret_code = cugraph_uniform_neighbor_sample_with_edge_properties(handle, graph, d_start_view, d_start_labels_view, NULL, NULL, h_fan_out_view, rng_state, FALSE, TRUE, FALSE, &result, &ret_error); #ifdef NO_CUGRAPH_OPS TEST_ASSERT( test_ret_value, ret_code != CUGRAPH_SUCCESS, "uniform_neighbor_sample should have failed") #else TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "uniform_neighbor_sample failed."); cugraph_type_erased_device_array_view_t* result_srcs; cugraph_type_erased_device_array_view_t* result_dsts; cugraph_type_erased_device_array_view_t* result_edge_id; cugraph_type_erased_device_array_view_t* result_weights; cugraph_type_erased_device_array_view_t* result_edge_types; cugraph_type_erased_device_array_view_t* result_hops; cugraph_type_erased_device_array_view_t* result_offsets; result_srcs = cugraph_sample_result_get_sources(result); result_dsts = cugraph_sample_result_get_destinations(result); result_edge_id = cugraph_sample_result_get_edge_id(result); result_weights = cugraph_sample_result_get_edge_weight(result); result_edge_types = cugraph_sample_result_get_edge_type(result); result_hops = cugraph_sample_result_get_hop(result); result_offsets = cugraph_sample_result_get_offsets(result); size_t result_size = cugraph_type_erased_device_array_view_size(result_srcs); size_t result_offsets_size = cugraph_type_erased_device_array_view_size(result_offsets); vertex_t h_srcs[result_size]; vertex_t h_dsts[result_size]; edge_t h_edge_id[result_size]; weight_t h_weight[result_size]; int32_t h_edge_types[result_size]; int32_t h_hops[result_size]; size_t h_result_offsets[result_offsets_size]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_srcs, result_srcs, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_dsts, result_dsts, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_edge_id, result_edge_id, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_weight, result_weights, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_edge_types, result_edge_types, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_hops, result_hops, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( handle, (byte_t*)h_result_offsets, result_offsets, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); // NOTE: The C++ tester does a more thorough validation. For our purposes // here we will do a simpler validation, merely checking that all edges // are actually part of the graph weight_t M_w[num_vertices][num_vertices]; edge_t M_edge_id[num_vertices][num_vertices]; int32_t M_edge_type[num_vertices][num_vertices]; for (int i = 0; i < num_vertices; ++i) for (int j = 0; j < num_vertices; ++j) { M_w[i][j] = 0.0; M_edge_id[i][j] = -1; M_edge_type[i][j] = -1; } for (int i = 0; i < num_edges; ++i) { M_w[src[i]][dst[i]] = weight[i]; M_edge_id[src[i]][dst[i]] = edge_ids[i]; M_edge_type[src[i]][dst[i]] = edge_types[i]; } for (int i = 0; (i < result_size) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, M_w[h_srcs[i]][h_dsts[i]] == h_weight[i], "uniform_neighbor_sample got edge that doesn't exist"); TEST_ASSERT(test_ret_value, M_edge_id[h_srcs[i]][h_dsts[i]] == h_edge_id[i], "uniform_neighbor_sample got edge that doesn't exist"); TEST_ASSERT(test_ret_value, M_edge_type[h_srcs[i]][h_dsts[i]] == h_edge_types[i], "uniform_neighbor_sample got edge that doesn't exist"); } cugraph_sample_result_free(result); #endif cugraph_sg_graph_free(graph); cugraph_error_free(ret_error); } int main(int argc, char** argv) { cugraph_resource_handle_t* handle = NULL; handle = cugraph_create_resource_handle(NULL); int result = 0; result |= RUN_TEST_NEW(test_uniform_neighbor_sample_with_properties, handle); result |= RUN_TEST_NEW(test_uniform_neighbor_sample_with_labels, handle); cugraph_free_resource_handle(handle); return result; }
21,761
42.178571
100
c
cugraph-branch-23.08/cpp/tests/c_api/weakly_connected_components_test.c
cugraph-branch-23.08/cpp/tests/c_api/weakly_connected_components_test.c
/* * Copyright (c) 2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "c_test_utils.h" /* RUN_TEST */ #include <cugraph_c/algorithms.h> #include <cugraph_c/graph.h> #include <math.h> typedef int32_t vertex_t; typedef int32_t edge_t; typedef float weight_t; int generic_wcc_test(vertex_t* h_src, vertex_t* h_dst, weight_t* h_wgt, vertex_t* h_result, size_t num_vertices, size_t num_edges, bool_t store_transposed) { int test_ret_value = 0; cugraph_error_code_t ret_code = CUGRAPH_SUCCESS; cugraph_error_t* ret_error; cugraph_resource_handle_t* p_handle = NULL; cugraph_graph_t* p_graph = NULL; cugraph_labeling_result_t* p_result = NULL; p_handle = cugraph_create_resource_handle(NULL); TEST_ASSERT(test_ret_value, p_handle != NULL, "resource handle creation failed."); ret_code = create_test_graph( p_handle, h_src, h_dst, h_wgt, num_edges, store_transposed, FALSE, TRUE, &p_graph, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "create_test_graph failed."); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); ret_code = cugraph_weakly_connected_components(p_handle, p_graph, FALSE, &p_result, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, cugraph_error_message(ret_error)); TEST_ASSERT( test_ret_value, ret_code == CUGRAPH_SUCCESS, "cugraph_weakly_connected_components failed."); cugraph_type_erased_device_array_view_t* vertices; cugraph_type_erased_device_array_view_t* components; vertices = cugraph_labeling_result_get_vertices(p_result); components = cugraph_labeling_result_get_labels(p_result); vertex_t h_vertices[num_vertices]; vertex_t h_components[num_vertices]; ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_vertices, vertices, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); ret_code = cugraph_type_erased_device_array_view_copy_to_host( p_handle, (byte_t*)h_components, components, &ret_error); TEST_ASSERT(test_ret_value, ret_code == CUGRAPH_SUCCESS, "copy_to_host failed."); vertex_t component_check[num_vertices]; for (vertex_t i = 0; i < num_vertices; ++i) { component_check[i] = num_vertices; } for (vertex_t i = 0 ; i < num_vertices; ++i) { if (component_check[h_result[h_vertices[i]]] == num_vertices) { component_check[h_result[h_vertices[i]]] = h_components[i]; } } for (int i = 0; (i < num_vertices) && (test_ret_value == 0); ++i) { TEST_ASSERT(test_ret_value, h_components[i] == component_check[h_result[h_vertices[i]]], "component results don't match"); } cugraph_type_erased_device_array_view_free(components); cugraph_type_erased_device_array_view_free(vertices); cugraph_labeling_result_free(p_result); cugraph_sg_graph_free(p_graph); cugraph_free_resource_handle(p_handle); cugraph_error_free(ret_error); return test_ret_value; } int test_weakly_connected_components() { size_t num_edges = 32; size_t num_vertices = 12; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 6, 7, 7, 8, 8, 8, 9, 10, 1, 3, 4, 0, 1, 3, 5, 5, 7, 9, 10, 6, 7, 9, 11, 11}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 7, 9, 10, 6, 7, 9, 11, 11, 0, 1, 1, 2, 2, 2, 3, 4, 6, 7, 7, 8, 8, 8, 9, 10}; weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_result[] = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}; // WCC wants store_transposed = FALSE return generic_wcc_test(h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, FALSE); } int test_weakly_connected_components_transpose() { size_t num_edges = 32; size_t num_vertices = 12; vertex_t h_src[] = {0, 1, 1, 2, 2, 2, 3, 4, 6, 7, 7, 8, 8, 8, 9, 10, 1, 3, 4, 0, 1, 3, 5, 5, 7, 9, 10, 6, 7, 9, 11, 11}; vertex_t h_dst[] = {1, 3, 4, 0, 1, 3, 5, 5, 7, 9, 10, 6, 7, 9, 11, 11, 0, 1, 1, 2, 2, 2, 3, 4, 6, 7, 7, 8, 8, 8, 9, 10}; weight_t h_wgt[] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; vertex_t h_result[] = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}; // WCC wants store_transposed = FALSE return generic_wcc_test(h_src, h_dst, h_wgt, h_result, num_vertices, num_edges, TRUE); } /******************************************************************************/ int main(int argc, char** argv) { int result = 0; result |= RUN_TEST(test_weakly_connected_components); result |= RUN_TEST(test_weakly_connected_components_transpose); return result; }
5,588
37.280822
126
c
cugraph-branch-23.08/cpp/tests/layout/legacy/knn.h
cugraph-branch-23.08/cpp/tests/layout/legacy/knn.h
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <cmath> #include <list> #include <map> #include <set> #include <vector> struct point { point() {} point(std::vector<double> vec) { attributes.assign(vec.begin(), vec.end()); } point(std::list<double>& data) : attributes(data) {} std::list<double> attributes; double distance; int index; }; struct point_compare { bool operator()(const point& x, const point& y) const { if (x.distance != y.distance) return x.distance < y.distance; return x.attributes.front() < y.attributes.front(); } }; double sq_euclid_dist(const point& x, const point& y) { double total = 0; auto i = x.attributes.begin(); auto j = y.attributes.begin(); for (; i != x.attributes.end() && j != y.attributes.end(); ++i, ++j) total += pow(*i - *j, 2); return total; } std::vector<int> knn_classify(std::list<point>& dataframe, const point& c, const int k) { std::set<point, point_compare> distances; auto i = dataframe.begin(); int index = 0; for (; i != dataframe.end(); ++i) { i->distance = sq_euclid_dist(c, *i); i->index = index++; distances.insert(*i); } std::vector<int> res; auto count = 0; auto j = distances.begin(); ++j; for (; j != distances.end() && count < k; ++j, ++count) res.push_back(j->index); return res; }
1,935
26.267606
87
h
cugraph-branch-23.08/cpp/tests/layout/legacy/trust_worthiness.h
cugraph-branch-23.08/cpp/tests/layout/legacy/trust_worthiness.h
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "knn.h" #include <algorithm> #include <iostream> #include <vector> double euclidian_dist(const std::vector<int>& x, const std::vector<int>& y) { double total = 0; auto i = x.begin(); auto j = y.begin(); for (; i != x.end() && j != y.end(); ++i, ++j) total += pow(*i, 2) - 2 * *i * *j + pow(*j, 2); return sqrt(total); } std::vector<std::vector<double>> pairwise_distances(const std::vector<std::vector<int>>& X) { std::vector<std::vector<double>> distance_matrix(X.size(), std::vector<double>(X[0].size())); for (size_t i = 0; i < X.size(); ++i) { for (size_t j = 0; j < i; ++j) { const float val = euclidian_dist(X[i], X[j]); distance_matrix[i][j] = val; distance_matrix[j][i] = val; } } return distance_matrix; } template <typename Iter, typename Compare> std::vector<int> argsort(Iter begin, Iter end, Compare comp) { std::vector<std::pair<int, Iter>> pairList; std::vector<int> ret; int i = 0; for (auto it = begin; it < end; it++) { std::pair<int, Iter> pair(i, it); pairList.push_back(pair); i++; } std::stable_sort(pairList.begin(), pairList.end(), [comp](std::pair<int, Iter> prev, std::pair<int, Iter> next) -> bool { return comp(*prev.second, *next.second); }); for (auto i : pairList) ret.push_back(i.first); return ret; } void fill_diag(std::vector<std::vector<double>>& X) { for (size_t i = 0; i < X.size(); ++i) { for (size_t j = 0; j < X[i].size(); ++j) { if (i == j) X[i][j] = INFINITY; } } } std::vector<std::vector<int>> get_knn_indices(const std::vector<std::vector<double>>& X, const int k) { std::list<point> X_list; for (size_t i = 0; i < X.size(); ++i) { point p(X[i]); X_list.push_back(p); } std::vector<std::vector<int>> ind_X_embedded; for (auto i = X_list.begin(); i != X_list.end(); ++i) { auto temp = knn_classify(X_list, *i, k); ind_X_embedded.push_back(temp); } return ind_X_embedded; } double compute_rank(const std::vector<std::vector<int>>& ind_X, std::vector<std::vector<int>>& ind_X_embedded, const int k) { const auto n = ind_X.size(); auto rank = 0; for (size_t i = 0; i < n; ++i) { std::vector<int> ranks(k, 0); for (auto j = 0; j < k; ++j) { auto it = std::find(ind_X[i].begin(), ind_X[i].end(), ind_X_embedded[i][j]); if (it != ind_X[i].end()) { auto idx = std::distance(ind_X[i].begin(), it); ranks[j] = idx; } } for (auto& val : ranks) val -= k; for (const auto& val : ranks) if (val > 0) rank += val; } return rank; } template <typename T> void print_matrix(const std::vector<std::vector<T>>& matrix) { for (size_t i = 0; i < matrix.size(); ++i) { std::cout << "[ "; for (size_t j = 0; j < matrix[i].size(); ++j) { std::cout << matrix[i][j] << ' '; } std::cout << "]\n"; } } double trustworthiness_score(const std::vector<std::vector<int>>& X, const std::vector<std::vector<double>>& Y, int n, int d, int k) { auto dist_X = pairwise_distances(X); fill_diag(dist_X); std::vector<std::vector<int>> ind_X; for (size_t i = 0; i < dist_X.size(); ++i) { auto tmp = argsort(dist_X[i].begin(), dist_X[i].end(), std::less<double>()); ind_X.push_back(tmp); } auto ind_X_embedded = get_knn_indices(Y, k); double t = compute_rank(ind_X, ind_X_embedded, k); t = 1.0 - t * (2.0 / (n * k * (2.0 * n - 3.0 * k - 1.0))); return t; }
4,363
27.154839
95
h
cugraph-branch-23.08/cpp/tests/traversal/legacy/bfs_ref.h
cugraph-branch-23.08/cpp/tests/traversal/legacy/bfs_ref.h
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <limits> #include <queue> #include <stack> #include <vector> template <typename VT, typename ET> void populate_neighbors(VT* indices, ET* offsets, VT w, std::vector<VT>& neighbors) { ET edge_start = offsets[w]; ET edge_end = offsets[w + 1]; neighbors.assign(indices + edge_start, indices + edge_end); } // This implements the BFS based on (Brandes, 2001) for shortest path counting template <typename VT, typename ET> void ref_bfs(VT* indices, ET* offsets, VT const number_of_vertices, std::queue<VT>& Q, std::stack<VT>& S, std::vector<VT>& dist, std::vector<std::vector<VT>>& pred, std::vector<double>& sigmas, VT source) { std::vector<VT> neighbors; pred.clear(); pred.resize(number_of_vertices); dist.assign(number_of_vertices, std::numeric_limits<VT>::max()); sigmas.assign(number_of_vertices, 0); dist[source] = 0; sigmas[source] = 1; Q.push(source); // b. Traversal while (!Q.empty()) { VT v = Q.front(); Q.pop(); S.push(v); populate_neighbors<VT, ET>(indices, offsets, v, neighbors); for (VT w : neighbors) { // Path Discovery: // Found for the first time? if (dist[w] == std::numeric_limits<VT>::max()) { dist[w] = dist[v] + 1; Q.push(w); } // Path counting // Edge(v, w) on a shortest path? if (dist[w] == dist[v] + 1) { sigmas[w] += sigmas[v]; pred[w].push_back(v); } } } }
2,166
28.283784
83
h
cugraph-branch-23.08/thirdparty/mmio/mmio.c
cugraph-branch-23.08/thirdparty/mmio/mmio.c
/* * Matrix Market I/O library for ANSI C * * See http://math.nist.gov/MatrixMarket for details. * * */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "mmio.h" int mm_read_unsymmetric_sparse(const char *fname, int *M_, int *N_, int *nz_, double **val_, int **I_, int **J_) { FILE *f; MM_typecode matcode; int M, N, nz; int i; double *val; int *I, *J; if ((f = fopen(fname, "r")) == NULL) return -1; if (mm_read_banner(f, &matcode) != 0) { printf("mm_read_unsymetric: Could not process Matrix Market banner "); printf(" in file [%s]\n", fname); return -1; } if ( !(mm_is_real(matcode) && mm_is_matrix(matcode) && mm_is_sparse(matcode))) { fprintf(stderr, "Sorry, this application does not support "); fprintf(stderr, "Market Market type: [%s]\n", mm_typecode_to_str(matcode)); return -1; } /* find out size of sparse matrix: M, N, nz .... */ if (mm_read_mtx_crd_size(f, &M, &N, &nz) !=0) { fprintf(stderr, "read_unsymmetric_sparse(): could not parse matrix size.\n"); return -1; } *M_ = M; *N_ = N; *nz_ = nz; /* reseve memory for matrices */ I = (int *) malloc(nz * sizeof(int)); J = (int *) malloc(nz * sizeof(int)); val = (double *) malloc(nz * sizeof(double)); *val_ = val; *I_ = I; *J_ = J; /* NOTE: when reading in doubles, ANSI C requires the use of the "l" */ /* specifier as in "%lg", "%lf", "%le", otherwise errors will occur */ /* (ANSI C X3.159-1989, Sec. 4.9.6.2, p. 136 lines 13-15) */ for (i=0; i<nz; i++) { fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]); I[i]--; /* adjust from 1-based to 0-based */ J[i]--; } fclose(f); return 0; } int mm_is_valid(MM_typecode matcode) { if (!mm_is_matrix(matcode)) return 0; if (mm_is_dense(matcode) && mm_is_pattern(matcode)) return 0; if (mm_is_real(matcode) && mm_is_hermitian(matcode)) return 0; if (mm_is_pattern(matcode) && (mm_is_hermitian(matcode) || mm_is_skew(matcode))) return 0; return 1; } int mm_read_banner(FILE *f, MM_typecode *matcode) { char line[MM_MAX_LINE_LENGTH]; char banner[MM_MAX_TOKEN_LENGTH]; char mtx[MM_MAX_TOKEN_LENGTH]; char crd[MM_MAX_TOKEN_LENGTH]; char data_type[MM_MAX_TOKEN_LENGTH]; char storage_scheme[MM_MAX_TOKEN_LENGTH]; char *p; mm_clear_typecode(matcode); if (fgets(line, MM_MAX_LINE_LENGTH, f) == NULL) return MM_PREMATURE_EOF; if (sscanf(line, "%s %s %s %s %s", banner, mtx, crd, data_type, storage_scheme) != 5) return MM_PREMATURE_EOF; for (p=mtx; *p!='\0'; *p=tolower(*p),p++); /* convert to lower case */ for (p=crd; *p!='\0'; *p=tolower(*p),p++); for (p=data_type; *p!='\0'; *p=tolower(*p),p++); for (p=storage_scheme; *p!='\0'; *p=tolower(*p),p++); /* check for banner */ if (strncmp(banner, MatrixMarketBanner, strlen(MatrixMarketBanner)) != 0) return MM_NO_HEADER; /* first field should be "mtx" */ if (strcmp(mtx, MM_MTX_STR) != 0) return MM_UNSUPPORTED_TYPE; mm_set_matrix(matcode); /* second field describes whether this is a sparse matrix (in coordinate storgae) or a dense array */ if (strcmp(crd, MM_SPARSE_STR) == 0) mm_set_sparse(matcode); else if (strcmp(crd, MM_DENSE_STR) == 0) mm_set_dense(matcode); else return MM_UNSUPPORTED_TYPE; /* third field */ if (strcmp(data_type, MM_REAL_STR) == 0) mm_set_real(matcode); else if (strcmp(data_type, MM_COMPLEX_STR) == 0) mm_set_complex(matcode); else if (strcmp(data_type, MM_PATTERN_STR) == 0) mm_set_pattern(matcode); else if (strcmp(data_type, MM_INT_STR) == 0) mm_set_integer(matcode); else return MM_UNSUPPORTED_TYPE; /* fourth field */ if (strcmp(storage_scheme, MM_GENERAL_STR) == 0) mm_set_general(matcode); else if (strcmp(storage_scheme, MM_SYMM_STR) == 0) mm_set_symmetric(matcode); else if (strcmp(storage_scheme, MM_HERM_STR) == 0) mm_set_hermitian(matcode); else if (strcmp(storage_scheme, MM_SKEW_STR) == 0) mm_set_skew(matcode); else return MM_UNSUPPORTED_TYPE; return 0; } int mm_write_mtx_crd_size(FILE *f, int M, int N, int nz) { if (fprintf(f, "%d %d %d\n", M, N, nz) != 3) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz ) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = *nz = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line,MM_MAX_LINE_LENGTH,f) == NULL) return MM_PREMATURE_EOF; }while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d %d", M, N, nz) == 3) return 0; else do { num_items_read = fscanf(f, "%d %d %d", M, N, nz); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 3); return 0; } int mm_read_mtx_array_size(FILE *f, int *M, int *N) { char line[MM_MAX_LINE_LENGTH]; int num_items_read; /* set return null parameter values, in case we exit with errors */ *M = *N = 0; /* now continue scanning until you reach the end-of-comments */ do { if (fgets(line,MM_MAX_LINE_LENGTH,f) == NULL) return MM_PREMATURE_EOF; }while (line[0] == '%'); /* line[] is either blank or has M,N, nz */ if (sscanf(line, "%d %d", M, N) == 2) return 0; else /* we have a blank line */ do { num_items_read = fscanf(f, "%d %d", M, N); if (num_items_read == EOF) return MM_PREMATURE_EOF; } while (num_items_read != 2); return 0; } int mm_write_mtx_array_size(FILE *f, int M, int N) { if (fprintf(f, "%d %d\n", M, N) != 2) return MM_COULD_NOT_WRITE_FILE; else return 0; } /*-------------------------------------------------------------------------*/ /******************************************************************/ /* use when I[], J[], and val[]J, and val[] are already allocated */ /******************************************************************/ int mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { int i; if (mm_is_complex(matcode)) { for (i=0; i<nz; i++) if (fscanf(f, "%d %d %lg %lg", &I[i], &J[i], &val[2*i], &val[2*i+1]) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { for (i=0; i<nz; i++) { if (fscanf(f, "%d %d %lg\n", &I[i], &J[i], &val[i]) != 3) return MM_PREMATURE_EOF; } } else if (mm_is_pattern(matcode)) { for (i=0; i<nz; i++) if (fscanf(f, "%d %d", &I[i], &J[i]) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } int mm_read_mtx_crd_entry(FILE *f, int *I, int *J, double *real, double *imag, MM_typecode matcode) { if (mm_is_complex(matcode)) { if (fscanf(f, "%d %d %lg %lg", I, J, real, imag) != 4) return MM_PREMATURE_EOF; } else if (mm_is_real(matcode)) { if (fscanf(f, "%d %d %lg\n", I, J, real) != 3) return MM_PREMATURE_EOF; } else if (mm_is_pattern(matcode)) { if (fscanf(f, "%d %d", I, J) != 2) return MM_PREMATURE_EOF; } else return MM_UNSUPPORTED_TYPE; return 0; } /************************************************************************ mm_read_mtx_crd() fills M, N, nz, array of values, and return type code, e.g. 'MCRS' if matrix is complex, values[] is of size 2*nz, (nz pairs of real/imaginary values) ************************************************************************/ int mm_read_mtx_crd(char *fname, int *M, int *N, int *nz, int **I, int **J, double **val, MM_typecode *matcode) { int ret_code; FILE *f; if (strcmp(fname, "stdin") == 0) f=stdin; else if ((f = fopen(fname, "r")) == NULL) return MM_COULD_NOT_READ_FILE; if ((ret_code = mm_read_banner(f, matcode)) != 0) return ret_code; if (!(mm_is_valid(*matcode) && mm_is_sparse(*matcode) && mm_is_matrix(*matcode))) return MM_UNSUPPORTED_TYPE; if ((ret_code = mm_read_mtx_crd_size(f, M, N, nz)) != 0) return ret_code; *I = (int *) malloc(*nz * sizeof(int)); *J = (int *) malloc(*nz * sizeof(int)); *val = NULL; if (mm_is_complex(*matcode)) { *val = (double *) malloc(*nz * 2 * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_real(*matcode)) { *val = (double *) malloc(*nz * sizeof(double)); ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } else if (mm_is_pattern(*matcode)) { ret_code = mm_read_mtx_crd_data(f, *M, *N, *nz, *I, *J, *val, *matcode); if (ret_code != 0) return ret_code; } if (f != stdin) fclose(f); return 0; } int mm_write_banner(FILE *f, MM_typecode matcode) { char *str = mm_typecode_to_str(matcode); int ret_code; ret_code = fprintf(f, "%s %s\n", MatrixMarketBanner, str); free(str); if (ret_code !=2 ) return MM_COULD_NOT_WRITE_FILE; else return 0; } int mm_write_mtx_crd(char fname[], int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode) { FILE *f; int i; if (strcmp(fname, "stdout") == 0) f = stdout; else if ((f = fopen(fname, "w")) == NULL) return MM_COULD_NOT_WRITE_FILE; /* print banner followed by typecode */ fprintf(f, "%s ", MatrixMarketBanner); fprintf(f, "%s\n", mm_typecode_to_str(matcode)); /* print matrix sizes and nonzeros */ fprintf(f, "%d %d %d\n", M, N, nz); /* print values */ if (mm_is_pattern(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d\n", I[i], J[i]); else if (mm_is_real(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d %20.16g\n", I[i], J[i], val[i]); else if (mm_is_complex(matcode)) for (i=0; i<nz; i++) fprintf(f, "%d %d %20.16g %20.16g\n", I[i], J[i], val[2*i], val[2*i+1]); else { if (f != stdout) fclose(f); return MM_UNSUPPORTED_TYPE; } if (f !=stdout) fclose(f); return 0; } /** * Create a new copy of a string s. mm_strdup() is a common routine, but * not part of ANSI C, so it is included here. Used by mm_typecode_to_str(). * */ char *mm_strdup(const char *s) { int len = strlen(s); char *s2 = (char *) malloc((len+1)*sizeof(char)); return strcpy(s2, s); } char *mm_typecode_to_str(MM_typecode matcode) { char buffer[MM_MAX_LINE_LENGTH]; char *types[4]; char *mm_strdup(const char *); //int error =0; /* check for MTX type */ if (mm_is_matrix(matcode)) types[0] = MM_MTX_STR; else return NULL; /* check for CRD or ARR matrix */ if (mm_is_sparse(matcode)) types[1] = MM_SPARSE_STR; else if (mm_is_dense(matcode)) types[1] = MM_DENSE_STR; else return NULL; /* check for element data type */ if (mm_is_real(matcode)) types[2] = MM_REAL_STR; else if (mm_is_complex(matcode)) types[2] = MM_COMPLEX_STR; else if (mm_is_pattern(matcode)) types[2] = MM_PATTERN_STR; else if (mm_is_integer(matcode)) types[2] = MM_INT_STR; else return NULL; /* check for symmetry type */ if (mm_is_general(matcode)) types[3] = MM_GENERAL_STR; else if (mm_is_symmetric(matcode)) types[3] = MM_SYMM_STR; else if (mm_is_hermitian(matcode)) types[3] = MM_HERM_STR; else if (mm_is_skew(matcode)) types[3] = MM_SKEW_STR; else return NULL; sprintf(buffer,"%s %s %s %s", types[0], types[1], types[2], types[3]); return mm_strdup(buffer); }
12,903
24.203125
85
c
cugraph-branch-23.08/thirdparty/mmio/mmio.h
cugraph-branch-23.08/thirdparty/mmio/mmio.h
/* * Matrix Market I/O library for ANSI C * * See http://math.nist.gov/MatrixMarket for details. * * */ #ifndef MM_IO_H #define MM_IO_H #define MM_MAX_LINE_LENGTH 1025 #define MatrixMarketBanner "%%MatrixMarket" #define MM_MAX_TOKEN_LENGTH 64 typedef char MM_typecode[4]; char *mm_typecode_to_str(MM_typecode matcode); int mm_read_banner(FILE *f, MM_typecode *matcode); int mm_read_mtx_crd_size(FILE *f, int *M, int *N, int *nz); int mm_read_mtx_array_size(FILE *f, int *M, int *N); int mm_write_banner(FILE *f, MM_typecode matcode); int mm_write_mtx_crd_size(FILE *f, int M, int N, int nz); int mm_write_mtx_array_size(FILE *f, int M, int N); /********************* MM_typecode query fucntions ***************************/ #define mm_is_matrix(typecode) ((typecode)[0]=='M') #define mm_is_sparse(typecode) ((typecode)[1]=='C') #define mm_is_coordinate(typecode)((typecode)[1]=='C') #define mm_is_dense(typecode) ((typecode)[1]=='A') #define mm_is_array(typecode) ((typecode)[1]=='A') #define mm_is_complex(typecode) ((typecode)[2]=='C') #define mm_is_real(typecode) ((typecode)[2]=='R') #define mm_is_pattern(typecode) ((typecode)[2]=='P') #define mm_is_integer(typecode) ((typecode)[2]=='I') #define mm_is_symmetric(typecode)((typecode)[3]=='S') #define mm_is_general(typecode) ((typecode)[3]=='G') #define mm_is_skew(typecode) ((typecode)[3]=='K') #define mm_is_hermitian(typecode)((typecode)[3]=='H') int mm_is_valid(MM_typecode matcode); /* too complex for a macro */ /********************* MM_typecode modify fucntions ***************************/ #define mm_set_matrix(typecode) ((*typecode)[0]='M') #define mm_set_coordinate(typecode) ((*typecode)[1]='C') #define mm_set_array(typecode) ((*typecode)[1]='A') #define mm_set_dense(typecode) mm_set_array(typecode) #define mm_set_sparse(typecode) mm_set_coordinate(typecode) #define mm_set_complex(typecode)((*typecode)[2]='C') #define mm_set_real(typecode) ((*typecode)[2]='R') #define mm_set_pattern(typecode)((*typecode)[2]='P') #define mm_set_integer(typecode)((*typecode)[2]='I') #define mm_set_symmetric(typecode)((*typecode)[3]='S') #define mm_set_general(typecode)((*typecode)[3]='G') #define mm_set_skew(typecode) ((*typecode)[3]='K') #define mm_set_hermitian(typecode)((*typecode)[3]='H') #define mm_clear_typecode(typecode) ((*typecode)[0]=(*typecode)[1]= \ (*typecode)[2]=' ',(*typecode)[3]='G') #define mm_initialize_typecode(typecode) mm_clear_typecode(typecode) /********************* Matrix Market error codes ***************************/ #define MM_COULD_NOT_READ_FILE 11 #define MM_PREMATURE_EOF 12 #define MM_NOT_MTX 13 #define MM_NO_HEADER 14 #define MM_UNSUPPORTED_TYPE 15 #define MM_LINE_TOO_LONG 16 #define MM_COULD_NOT_WRITE_FILE 17 /******************** Matrix Market internal definitions ******************** MM_matrix_typecode: 4-character sequence ojbect sparse/ data storage dense type scheme string position: [0] [1] [2] [3] Matrix typecode: M(atrix) C(oord) R(eal) G(eneral) A(array) C(omplex) H(ermitian) P(attern) S(ymmetric) I(nteger) K(kew) ***********************************************************************/ #define MM_MTX_STR "matrix" #define MM_ARRAY_STR "array" #define MM_DENSE_STR "array" #define MM_COORDINATE_STR "coordinate" #define MM_SPARSE_STR "coordinate" #define MM_COMPLEX_STR "complex" #define MM_REAL_STR "real" #define MM_INT_STR "integer" #define MM_GENERAL_STR "general" #define MM_SYMM_STR "symmetric" #define MM_HERM_STR "hermitian" #define MM_SKEW_STR "skew-symmetric" #define MM_PATTERN_STR "pattern" /* high level routines */ int mm_write_mtx_crd(char fname[], int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode); int mm_read_mtx_crd_data(FILE *f, int M, int N, int nz, int I[], int J[], double val[], MM_typecode matcode); int mm_read_mtx_crd_entry(FILE *f, int *I, int *J, double *real, double *img, MM_typecode matcode); int mm_read_unsymmetric_sparse(const char *fname, int *M_, int *N_, int *nz_, double **val_, int **I_, int **J_); #endif
4,206
30.395522
80
h
finmag
finmag-master/dev/sandbox/cvode_parallel/llg/llg.c
#include "llg.h" int llg_rhs(Vec M, Vec H, Vec dM_dt, Vec alpha_v, double gamma, int do_precession, double char_freq) { PetscReal *m, *h, *dm_dt, *alpha; PetscReal mth0, mth1, mth2, a1, mm; int i,j,nlocal=0; VecGetArray(M, &m); VecGetArray(H, &h); VecGetArray(dM_dt, &dm_dt); VecGetArray(alpha_v, &alpha); VecGetLocalSize(M,&nlocal); if (do_precession) { for (i = 0; i < nlocal; i += 3) { j = i/3; a1 = -gamma / (1 + alpha[j] * alpha[j]); mth0 = a1 * (m[i + 1] * h[i + 2] - m[i + 2] * h[i + 1]); mth1 = a1 * (m[i + 2] * h[i] - m[i] * h[i + 2]); mth2 = a1 * (m[i] * h[i + 1] - m[i + 1] * h[i]); dm_dt[i] = mth0 + alpha[j] * (m[i + 1] * mth2 - m[i + 2] * mth1); dm_dt[i + 1] = mth1 + alpha[j] * (m[i + 2] * mth0 - m[i ] * mth2); dm_dt[i + 2] = mth2 + alpha[j] * (m[i] * mth1 - m[i + 1] * mth0); mm = m[i] * m[i] + m[i + 1] * m[i + 1] + m[i + 2] * m[i + 2]; dm_dt[i] += char_freq*(1-mm)*m[i]; dm_dt[i+1] += char_freq*(1-mm)*m[i+1]; dm_dt[i+2] += char_freq*(1-mm)*m[i+2]; } } else { for (i = 0; i < nlocal; i += 3) { j = i/3; a1 = -gamma / (1 + alpha[j] * alpha[j]); mth0 = a1 * (m[i + 1] * h[i + 2] - m[i + 2] * h[i + 1]); mth1 = a1 * (m[i + 2] * h[i ] - m[i] * h[i + 2]); mth2 = a1 * (m[i] * h[i + 1] - m[i + 1] * h[i]); dm_dt[i] = alpha[j] * (m[i + 1] * mth2 - m[i + 2] * mth1); dm_dt[i + 1] = alpha[j] * (m[i + 2] * mth0 - m[i ] * mth2); dm_dt[i + 2] = alpha[j] * (m[i] * mth1 - m[i + 1] * mth0); mm = m[i] * m[i] + m[i + 1] * m[i + 1] + m[i + 2] * m[i + 2]; dm_dt[i] += char_freq*(1-mm)*m[i]; dm_dt[i+1] += char_freq*(1-mm)*m[i+1]; dm_dt[i+2] += char_freq*(1-mm)*m[i+2]; } } VecRestoreArray(M, &m); VecRestoreArray(H, &h); VecRestoreArray(dM_dt, &dm_dt); VecRestoreArray(alpha_v, &alpha); return 0; }
2,188
30.724638
103
c
finmag
finmag-master/dev/sandbox/timeintegration/c-dolfin-compare/dmdt.c
int dmdt(double alpha, double gamma, double c, int Mn, double* M, int Hn, double* H, int dMdtn, double* dMdt, int Pn, double* P); #define DEBUG 1 #ifdef DEBUG #define DLOG(...) printf(__VA_ARGS__) #else #define DLOG(...) /* nothing */ #endif int dmdt(double alpha, double gamma, double c, int Mn, double* M, int Hn, double* H, int dMdtn, double* dMdt, int Pn, double* P) { if ( Mn != Hn || Mn != dMdtn ) { DLOG("in '%s': ", __PRETTY_FUNCTION__); DLOG("Arrays don't have the same number of entries. "); DLOG("M[%d], H[%d], dMdt[%d].\n", Mn, Hn, dMdtn); return EXIT_FAILURE; } const int DIMENSIONS = 3; if ( Mn % DIMENSIONS != 0 ) { DLOG("in '%s': ", __PRETTY_FUNCTION__); DLOG("Can't split arrays into %d dimensions. ", DIMENSIONS); DLOG("M[%d].\n", Mn); return EXIT_FAILURE; } const int ENTRIES_PER_DIM = Mn / DIMENSIONS; /* The first ENTRIES_PER_DIM entries correspond to the x-dimension, the second ENTRIES_PER_DIM entries to the y-dimension and the last bunch to the z-dimension. There could have been two variables called y_offset and z_offset, but X, Y and Z make for nicer array indexing. */ const int X = 0; const int Y = ENTRIES_PER_DIM; const int Z = 2 * ENTRIES_PER_DIM; double p = gamma / (1 + alpha*alpha); /* precession factor of the LLG */ /* double q = gamma * alpha / (1 + alpha*alpha); */ /* damping explicit */ double q = alpha * p; /* marginally faster than line above */ for ( int i=0; i<ENTRIES_PER_DIM; i++ ) { double MM = M[X+i]*M[X+i] + M[Y+i]*M[Y+i] + M[Z+i]*M[Z+i]; dMdt[X+i] = - p * (M[Y+i]*H[Z+i] - M[Z+i]*H[Y+i]) - q * ( M[Y+i] * (M[X+i]*H[Y+i] - M[Y+i]*H[X+i]) - M[Z+i] * (M[Z+i]*H[X+i] - M[X+i]*H[Z+i])) - c * (MM - 1) * M[X+i]; dMdt[Y+i] = - p * (M[Z+i]*H[X+i] - M[X+i]*H[Z+i]) - q * ( M[Z+i] * (M[Y+i]*H[Z+i] - M[Z+i]*H[Y+i]) - M[X+i] * (M[X+i]*H[Y+i] - M[Y+i]*H[X+i])) - c * (MM - 1) * M[Y+i]; dMdt[Z+i] = - p * (M[X+i]*H[Y+i] - M[Y+i]*H[X+i]) - q * ( M[X+i] * (M[Z+i]*H[X+i] - M[X+i]*H[Z+i]) - M[Y+i] * (M[Y+i]*H[Z+i] - M[Z+i]*H[Y+i])) - c * (MM - 1) * M[Z+i]; } for ( int i=0; i<Pn; i++ ) { /* pin the magnetisation at the given points by setting dM/dt to 0. */ int node = int(P[i]); dMdt[X+node] = 0; dMdt[Y+node] = 0; dMdt[Z+node] = 0; } return EXIT_SUCCESS; }
2,638
32.405063
78
c
finmag
finmag-master/dev/sandbox/treecode/fast_sum.h
#ifndef FAST_SUM_H #define FAST_SUM_H typedef struct { double x,y,z; } Cartesian_xyz; struct octree_node { int num_children; int num_particle; int have_moment; int need_upadte_moment; int begin; int end; double x,y,z;//node x,y,z double rx,ry,rz; double radius_square; double ***moment; struct octree_node *children[8]; }; typedef struct { int N_source; //Number of the nodes with known charge density int N_target; //Number of the nodes to be evaluated double *charge_density; // the coefficients of the source double *weights; double *x_s; //the coordinates of source nodes double *x_t; //the coordinates of target nodes double *x_s_bak; //the coordinates of source nodes in the original order //double *x_s_tet;//the coordinates of source nodes used for tetrahedron correction int *index; int triangle_p; int tetrahedron_p; int triangle_num; double *t_normal;//store the normal of the triangles in the boundary int *triangle_nodes;//store the mapping between face and nodes double critical_sigma; struct octree_node *tree; int p; double mac_square; int num_limit; } fastsum_plan; fastsum_plan *create_plan(); void init_mesh(fastsum_plan *plan, double *x_t, double *t_normal, int *triangle_nodes, int *tetrahedron_nodes); void update_charge_density(fastsum_plan *plan,double *m); void fastsum_finalize(fastsum_plan *plan); void fastsum_exact(fastsum_plan *plan, double *phi); void fastsum(fastsum_plan *plan, double *phi); void build_tree(fastsum_plan *plan); void init_fastsum(fastsum_plan *plan, int N_target, int triangle_p, int tetrahedron_p, int triangle_num, int tetrahedron_num, int p, double mac, int num_limit); void compute_correction(fastsum_plan *plan, double *m, double *phi); #endif /* FAST_SUM_H */
1,939
25.216216
100
h
finmag
finmag-master/dev/sandbox/treecode_bem/fast_sum.h
#ifndef FAST_SUM_H #define FAST_SUM_H typedef struct { double x,y,z; } Cartesian_xyz; struct octree_node { int num_children; int num_particle; int have_moment; int need_upadte_moment; int begin; int end; double x,y,z;//node x,y,z double rx,ry,rz; double radius_square; double radius; double ***moment; struct octree_node *children[8]; }; typedef struct { int N_source; //Number of the nodes with known charge density int N_target; //Number of the nodes to be evaluated double *charge_density; // the coefficients of the source double *weights; double *x_s; //the coordinates of source nodes double *x_t; //the coordinates of target nodes double *x_s_tri; //a triangle as a source point, needed in the analytical correction int *x_s_ids; int triangle_num; double *t_normal;//store the normal of the triangles in the boundary int *triangle_nodes;//store the mapping between face and nodes double critical_sigma; struct octree_node *tree; int p; double mac_square; int num_limit; double *vert_bsa; double r_eps; int *id_n; // indices nodes double *b_m;//boundary matrix //int *id_tn; int *id_nn; int total_length_n; } fastsum_plan; fastsum_plan *create_plan(); void update_potential_u1(fastsum_plan *plan,double *u1); void fastsum_finalize(fastsum_plan *plan); void fastsum(fastsum_plan *plan, double *phi,double *u1); void build_tree(fastsum_plan *plan); void bulid_indices(fastsum_plan *plan); void init_fastsum(fastsum_plan *plan, int N_target, int triangle_num, int p, double mac, int num_limit); void init_mesh(fastsum_plan *plan, double *x_t, double *t_normal, int *triangle_nodes, double *vert_bsa); void compute_triangle_source_nodes(fastsum_plan *plan); void compute_source_nodes_weights(fastsum_plan *plan); double solid_angle_single(double *p, double *x1, double *x2, double *x3); void copy_B(fastsum_plan *plan, double *B, int n);//used for test void boundary_element(double *xp, double *x1, double *x2, double *x3, double *res); int get_total_length(fastsum_plan *plan); void print_tree(fastsum_plan *plan); #endif /* FAST_SUM_H */
2,266
25.057471
105
h
finmag
finmag-master/examples/cubic_anisotropy/oommf_reference/cubicanisotropy8.h
/* FILE: cubicanisotropy8.h -*-Mode: c++-*- * * Cubic Anisotropy, derived from Oxs_Energy class. * * This interface is a modification of the interface * /oommf/app/oxs/ext/cubicanisotropy.h * It is designed for handling higher orders of the * power series of the cubic anisotropy * * The required values are * -scalar 'K1' (for fourth order power) * -scalar 'K2' (for sixth order power), * -scalar 'K3' (for eigth order power), * -vector 'axis1' indicating first cubic anisotropy direction * -vector 'axis2' indicating second cubic anisotropy direction * (the third axis is assumed to be perpendicular to axis1 and axis2) * * Juergen Zimmermann * Computational Engineering and Design Group * (c) 2005 University of Southampton * * file created Wed Mai 11 2005 * * file updated Thu April 19 2007: * renaming issues Ced_UniaxialAnisotropy to Oxs_UniaxialAnisotropy4 */ #ifndef _OXS_CUBICANISOTROPY8 #define _OXS_CUBICANISOTROPY8 #include "nb.h" #include "threevector.h" #include "energy.h" #include "key.h" #include "simstate.h" #include "mesh.h" #include "meshvalue.h" #include "scalarfield.h" #include "vectorfield.h" /* End includes */ class Southampton_CubicAnisotropy8:public Oxs_Energy { private: Oxs_OwnedPointer<Oxs_ScalarField> K1_init; Oxs_OwnedPointer<Oxs_ScalarField> K2_init; Oxs_OwnedPointer<Oxs_ScalarField> K3_init; Oxs_OwnedPointer<Oxs_VectorField> axis1_init; Oxs_OwnedPointer<Oxs_VectorField> axis2_init; mutable OC_UINT4m mesh_id; mutable Oxs_MeshValue<OC_REAL8m> K1; mutable Oxs_MeshValue<OC_REAL8m> K2; mutable Oxs_MeshValue<OC_REAL8m> K3; mutable Oxs_MeshValue<ThreeVector> axis1; mutable Oxs_MeshValue<ThreeVector> axis2; /// K1, K2, K3, axis1 and axis2 are cached values filled by corresponding /// *_init members when a change in mesh is detected. protected: virtual void GetEnergy(const Oxs_SimState& state, Oxs_EnergyData& oed) const; public: virtual const char* ClassName() const; // ClassName() is /// automatically generated by the OXS_EXT_REGISTER macro. Southampton_CubicAnisotropy8(const char* name, // Child instance id Oxs_Director* newdtr, // App director const char* argstr); // MIF input block parameters virtual ~Southampton_CubicAnisotropy8() {} }; #endif // _OXS_CUBICANISOTROPY8
2,332
30.106667
75
h
finmag
finmag-master/native/src/finmag_includes.h
/** * FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations * Copyright (C) 2012 University of Southampton * Do not distribute * * CONTACT: h.fangohr@soton.ac.uk * * AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko (d.chernyshenko@soton.ac.uk) */ #ifndef __FINMAG_INCLUDES_H #define __FINMAG_INCLUDES_H // Standard C/C++ includes #include <cstring> #include <cmath> #include <cstdlib> // OpenMP #include <omp.h> // Boost includes #ifndef IDE_ERROR_BLOCK #include <boost/mpl/map.hpp> #include <boost/mpl/transform.hpp> #include <boost/mpl/range_c.hpp> #include <boost/mpl/list.hpp> #include <boost/python.hpp> #include <boost/typeof/typeof.hpp> #include <boost/thread.hpp> #endif // CVODE/Sundials #include <cvode/cvode.h> // Dolfin #include <dolfin.h> #endif
802
20.131579
91
h
finmag
finmag-master/native/src/cvode/llg.c
#include "llg.h" int llg_rhs(Vec M, Vec H, Vec dM_dt, Vec alpha_v, double gamma, int do_precession, double char_freq) { PetscReal *m, *h, *dm_dt, *alpha; PetscReal mth0, mth1, mth2, a1, mm; int i,j,nlocal=0; VecGetArray(M, &m); VecGetArray(H, &h); VecGetArray(dM_dt, &dm_dt); VecGetArray(alpha_v, &alpha); VecGetLocalSize(M,&nlocal); if (do_precession) { for (i = 0; i < nlocal; i += 3) { j = i/3; a1 = -gamma / (1 + alpha[j] * alpha[j]); mth0 = a1 * (m[i + 1] * h[i + 2] - m[i + 2] * h[i + 1]); mth1 = a1 * (m[i + 2] * h[i] - m[i] * h[i + 2]); mth2 = a1 * (m[i] * h[i + 1] - m[i + 1] * h[i]); dm_dt[i] = mth0 + alpha[j] * (m[i + 1] * mth2 - m[i + 2] * mth1); dm_dt[i + 1] = mth1 + alpha[j] * (m[i + 2] * mth0 - m[i ] * mth2); dm_dt[i + 2] = mth2 + alpha[j] * (m[i] * mth1 - m[i + 1] * mth0); mm = m[i] * m[i] + m[i + 1] * m[i + 1] + m[i + 2] * m[i + 2]; dm_dt[i] += char_freq*(1-mm)*m[i]; dm_dt[i+1] += char_freq*(1-mm)*m[i+1]; dm_dt[i+2] += char_freq*(1-mm)*m[i+2]; } } else { for (i = 0; i < nlocal; i += 3) { j = i/3; a1 = -gamma / (1 + alpha[j] * alpha[j]); mth0 = a1 * (m[i + 1] * h[i + 2] - m[i + 2] * h[i + 1]); mth1 = a1 * (m[i + 2] * h[i ] - m[i] * h[i + 2]); mth2 = a1 * (m[i] * h[i + 1] - m[i + 1] * h[i]); dm_dt[i] = alpha[j] * (m[i + 1] * mth2 - m[i + 2] * mth1); dm_dt[i + 1] = alpha[j] * (m[i + 2] * mth0 - m[i ] * mth2); dm_dt[i + 2] = alpha[j] * (m[i] * mth1 - m[i + 1] * mth0); mm = m[i] * m[i] + m[i + 1] * m[i + 1] + m[i + 2] * m[i + 2]; dm_dt[i] += char_freq*(1-mm)*m[i]; dm_dt[i+1] += char_freq*(1-mm)*m[i+1]; dm_dt[i+2] += char_freq*(1-mm)*m[i+2]; } } VecRestoreArray(M, &m); VecRestoreArray(H, &h); VecRestoreArray(dM_dt, &dm_dt); VecRestoreArray(alpha_v, &alpha); return 0; }
2,188
30.724638
103
c
finmag
finmag-master/native/src/fast_sum_lib/fast_sum.h
#ifndef FAST_SUM_H #define FAST_SUM_H typedef struct { double x,y,z; } Cartesian_xyz; struct octree_node { int num_children; int num_particle; int have_moment; int need_upadte_moment; int begin; int end; double x,y,z;//node x,y,z double rx,ry,rz; double radius_square; double ***moment; struct octree_node *children[8]; }; typedef struct { int N_source; //Number of the nodes with known charge density int N_target; //Number of the nodes to be evaluated double *charge_density; // the coefficients of the source double *weights; double *x_s; //the coordinates of source nodes double *x_t; //the coordinates of target nodes double *x_s_bak; //the coordinates of source nodes in the original order //double *x_s_tet;//the coordinates of source nodes used for tetrahedron correction int *index; int triangle_p; int tetrahedron_p; int triangle_num; double *t_normal;//store the normal of the triangles in the boundary int *triangle_nodes;//store the mapping between face and nodes int tetrahedron_num; int *tetrahedron_nodes;//store the mapping between tetrahedron and nodes double *tetrahedron_correction;//store the correction coefficients double *tet_charge_density;//used for correction too double critical_sigma; struct octree_node *tree; int p; double mac_square; int num_limit; } fastsum_plan; fastsum_plan *create_plan(void); void init_mesh(fastsum_plan *plan, double *x_t, double *t_normal, int *triangle_nodes, int *tetrahedron_nodes); void update_charge_density(fastsum_plan *plan,double *m); void fastsum_finalize(fastsum_plan *plan); void fastsum_exact(fastsum_plan *plan, double *phi); void fastsum(fastsum_plan *plan, double *phi); void build_tree(fastsum_plan *plan); void init_fastsum(fastsum_plan *plan, int N_target, int triangle_p, int tetrahedron_p, int triangle_num, int tetrahedron_num, int p, double mac, int num_limit); void compute_correction(fastsum_plan *plan, double *m, double *phi); void compute_source_nodes_weights(fastsum_plan *plan); #endif /* FAST_SUM_H */
2,231
26.9
100
h
finmag
finmag-master/native/src/llb/mt19937.h
//#include <boost/random.hpp> //#include <boost/random/normal_distribution.hpp> #include "util/np_array.h" namespace finmag { namespace llb { /* class RandomMT19937 { private: boost::random::mt19937 engine; boost::variate_generator<boost::mt19937&, boost::normal_distribution<> > generator; public: RandomMT19937():engine(), generator(engine, boost::normal_distribution<>(0.0, 1.0)) {} void seed(unsigned int sd) { engine.seed(sd); } void gaussian_random_vec(double *x, int n, double dev){ for (int i = 0; i < n; i++) { x[i]=dev*generator(); } } }; */ #define MT19937_N 624 class RandomMT19937 { int random_index; unsigned int MT[MT19937_N]; private: double ltqnorm(void); public: RandomMT19937():random_index(0){}; double random(void); void initial_random(unsigned int seed); void gaussian_random_vec(double *x, int n, double dev); void gaussian_random_np(const np_array<double> &pa); }; }}
1,199
20.428571
89
h
finmag
finmag-master/native/src/sundials/numpy_malloc.h
/** * FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations * Copyright (C) 2012 University of Southampton * Do not distribute * * CONTACT: h.fangohr@soton.ac.uk * * AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko (d.chernyshenko@soton.ac.uk) */ #ifndef __FINMAG_UTIL_SUNDIALS_NUMPY_MALLOC_H #define __FINMAG_UTIL_SUNDIALS_NUMPY_MALLOC_H #include "util/np_array.h" #include <nvector/nvector_serial.h> namespace finmag { namespace sundials { void register_numpy_malloc(); np_array<double> nvector_to_array(N_Vector p); bp::object nvector_to_array_object(N_Vector p); extern "C" void * numpy_malloc(size_t len, size_t el_size); extern "C" void numpy_free(void *ptr); /* Wrapper class for Sundials NVectorSerial */ class array_nvector { public: array_nvector(const np_array<double> &data); N_Vector ptr() { return vec; } ~array_nvector() { if (vec) { N_VDestroy_Serial(vec); vec = 0; } } private: // Disallow copy constructor & assignment // Use auto_ptr/unique_ptr/shared_ptr for shared nvector_serial objects array_nvector(const array_nvector&); void operator=(const array_nvector&); N_Vector vec; // store a reference to the original array to prevent array memory from being freed np_array<double> arr; }; }} #endif
1,450
26.377358
91
h
finmag
finmag-master/native/src/sundials/nvector_custom_malloc.h
/** * FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations * Copyright (C) 2012 University of Southampton * Do not distribute * * CONTACT: h.fangohr@soton.ac.uk * * AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko (d.chernyshenko@soton.ac.uk) */ #ifndef __FINMAG_UTIL_SUNDIALS_NVECTOR_CUSTOM_MALLOC_H #define __FINMAG_UTIL_SUNDIALS_NVECTOR_CUSTOM_MALLOC_H #ifdef __cplusplus extern "C" { #endif #include <nvector/nvector_serial.h> extern void set_nvector_custom_allocators( void * (*data_malloc_func)(size_t, size_t), void (*data_free_func)(void *), N_VectorContent_Serial (*nvec_malloc_func)(), void (*nvec_free_func)(N_VectorContent_Serial) ); #ifdef __cplusplus } #endif #endif
784
22.088235
91
h
finmag
finmag-master/native/src/sundials/nvec_serial/nvector_custom_malloc_impl.h
/** * FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations * Copyright (C) 2012 University of Southampton * Do not distribute * * CONTACT: h.fangohr@soton.ac.uk * * AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko (d.chernyshenko@soton.ac.uk) */ #ifndef __FINMAG_UTIL_SUNDIALS_NVECTOR_CUSTOM_MALLOC_IMPL_H #define __FINMAG_UTIL_SUNDIALS_NVECTOR_CUSTOM_MALLOC_IMPL_H #include <stdlib.h> #include "../nvector_custom_malloc.h" #ifdef __cplusplus extern "C" { #endif static void * (*nvector_custom_data_malloc)(size_t len, size_t el_size); static void (*nvector_custom_data_free)(void *ptr); // TODO: rename nvector_custom_nvec_malloc -> nvector_custom_content_malloc static N_VectorContent_Serial (*nvector_custom_nvec_malloc)(); static void (*nvector_custom_nvec_free)(N_VectorContent_Serial ptr); void set_nvector_custom_allocators( void * (*data_malloc_func)(size_t, size_t), void (*data_free_func)(void *), N_VectorContent_Serial (*nvec_malloc_func)(), void (*nvec_free_func)(N_VectorContent_Serial) ) { nvector_custom_data_malloc = data_malloc_func; nvector_custom_data_free = data_free_func; nvector_custom_nvec_malloc = nvec_malloc_func; nvector_custom_nvec_free = nvec_free_func; } #ifdef __cplusplus } #endif #endif
1,343
27.595745
91
h
finmag
finmag-master/native/src/sundials/nvec_serial/sundials_math.c
/* * ----------------------------------------------------------------- * $Revision: 1.1 $ * $Date: 2006/07/05 15:32:38 $ * ----------------------------------------------------------------- * Programmer(s): Scott D. Cohen, Alan C. Hindmarsh and * Aaron Collier @ LLNL * ----------------------------------------------------------------- * Copyright (c) 2002, The Regents of the University of California. * Produced at the Lawrence Livermore National Laboratory. * All rights reserved. * For details, see the LICENSE file. * ----------------------------------------------------------------- * This is the implementation file for a simple C-language math * library. * ----------------------------------------------------------------- */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sundials/sundials_math.h> #define ZERO RCONST(0.0) #define ONE RCONST(1.0) realtype RPowerI(realtype base, int exponent) { int i, expt; realtype prod; prod = ONE; expt = abs(exponent); for(i = 1; i <= expt; i++) prod *= base; if (exponent < 0) prod = ONE/prod; return(prod); } realtype RPowerR(realtype base, realtype exponent) { if (base <= ZERO) return(ZERO); #if defined(SUNDIALS_USE_GENERIC_MATH) return((realtype) pow((double) base, (double) exponent)); #elif defined(SUNDIALS_DOUBLE_PRECISION) return(pow(base, exponent)); #elif defined(SUNDIALS_SINGLE_PRECISION) return(powf(base, exponent)); #elif defined(SUNDIALS_EXTENDED_PRECISION) return(powl(base, exponent)); #endif } realtype RSqrt(realtype x) { if (x <= ZERO) return(ZERO); #if defined(SUNDIALS_USE_GENERIC_MATH) return((realtype) sqrt((double) x)); #elif defined(SUNDIALS_DOUBLE_PRECISION) return(sqrt(x)); #elif defined(SUNDIALS_SINGLE_PRECISION) return(sqrtf(x)); #elif defined(SUNDIALS_EXTENDED_PRECISION) return(sqrtl(x)); #endif } realtype RAbs(realtype x) { #if defined(SUNDIALS_USE_GENERIC_MATH) return((realtype) fabs((double) x)); #elif defined(SUNDIALS_DOUBLE_PRECISION) return(fabs(x)); #elif defined(SUNDIALS_SINGLE_PRECISION) return(fabsf(x)); #elif defined(SUNDIALS_EXTENDED_PRECISION) return(fabsl(x)); #endif } realtype RExp(realtype x) { #if defined(SUNDIALS_USE_GENERIC_MATH) return((realtype) exp((double) x)); #elif defined(SUNDIALS_DOUBLE_PRECISION) return(exp(x)); #elif defined(SUNDIALS_SINGLE_PRECISION) return(expf(x)); #elif defined(SUNDIALS_EXTENDED_PRECISION) return(expl(x)); #endif }
2,474
25.052632
68
c
finmag
finmag-master/native/src/treecode_bem/common.h
#include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> typedef struct { double x,y,z; } Cartesian_xyz; struct octree_node { int num_children; int num_particle; int have_moment; int need_upadte_moment; int begin; int end; double x,y,z;//node x,y,z double rx,ry,rz; double radius_square; double radius; double ***moment; double *mom; struct octree_node *children[8]; }; extern inline double pow2(double x); extern inline double det2(double *dx, double *dy, double *dz); double **alloc_2d_double(int ndim1, int ndim2); void free_2d_double(double **p); double ***alloc_3d_double(int ndim1, int ndim2, int ndim3); void free_3d_double(double ***p, int ndim1); typedef struct { int N_source; //Number of the nodes with known charge density int N_target; //Number of the nodes to be evaluated double *charge_density; // the coefficients of the source double *weights; double *x_s; //the coordinates of source nodes double *x_t; //the coordinates of target nodes int *x_s_ids; int triangle_num; double *t_normal;//store the normal of the triangles in the boundary int *triangle_nodes;//store the mapping between face and nodes double critical_sigma; struct octree_node *tree; int p; double mac; double mac_square; int num_limit; double *vert_bsa; double r_eps; double r_eps_factor; double r_eps_squre; int *id_n; // indices nodes double *b_m;//boundary matrix int *id_nn; int total_length_n; } fastsum_plan; void compute_coefficient(double ***a, double dx, double dy, double dz, int p); void compute_moment(fastsum_plan *plan, struct octree_node *tree, double ***moment, double x, double y, double z); void reset_moment(fastsum_plan *plan, struct octree_node *tree); fastsum_plan *create_plan(void); void update_potential_u1(fastsum_plan *plan,double *u1); void fastsum_finalize(fastsum_plan *plan); void init_fastsum(fastsum_plan *plan, int N_target, int triangle_num, int p, double mac, int num_limit, double correct_factor); void init_mesh(fastsum_plan *plan, double *x_t, double *t_normal, int *triangle_nodes, double *vert_bsa); void build_tree(fastsum_plan *plan); void bulid_indices_I(fastsum_plan *plan); void bulid_indices_II(fastsum_plan *plan); void fast_sum_I(fastsum_plan *plan, double *phi,double *u1); void fast_sum_II(fastsum_plan *plan, double *phi,double *u1); void compute_source_nodes_weights(fastsum_plan *plan); void direct_sum_I(fastsum_plan *plan, double *phi, double *u1); double solid_angle_single(double *p, double *x1, double *x2, double *x3); void boundary_element(double *xp, double *x1, double *x2, double *x3, double *res, double *T); void build_matrix_T(double *x_t, int *tri_nodes, double *bm, double *T, int n_node, int n_face); int get_total_length(fastsum_plan *plan); void compute_coefficient_directly(double *a, double x, double y, double z, int p); void compute_moment_directly(fastsum_plan *plan, struct octree_node *tree, double *moment, double x, double y, double z);
3,108
27.787037
127
h
finmag
finmag-master/native/src/treecode_bem/test.c
#include "common.h" #include <assert.h> //gcc test.c common.c -lm static int ccc[35]={ 1, 1, 2, 6, 24, 1, 1, 2, 6, 2, 2, 4, 6, 6, 24, 1, 1, 2, 6, 1, 1, 2, 2, 2, 6, 2, 2, 4, 2, 2, 4, 6, 6, 6, 24 }; void test_compute_coefficient_directly(){ double a[35]; //computed using Mathematica double expected[35]={ 0.4800153607373193, 0.14378340298583298, 0.018603666107702595, -0.10465663185328682, -0.2580203559525883, 0.13272314121769196, 0.11926733888225773, 0.08688209195428556, -0.037675500984807146, -0.0005096894824028226, 0.0654962728884902, 0.14649121265269124, -0.12303011561409782, -0.09308064949187649, -0.23706584465824798, 0.12166287944955098, 0.10932839397540293, 0.07964191762476179, -0.03453587590273988, 0.10091851751575653, 0.1511452451272851, 0.20065187628560183, 0.055419923213337864, 0.16658481317792173, -0.07876054957004934, -0.01809397662529978, 0.03916035896479662, 0.11152914329989704, 0.036148023659812255, 0.13075615047668365, 0.0905746320055567, -0.13506184083809963, -0.13204893727518185, -0.12189132671555247, -0.2021037753054538}; double eps=1e-16; compute_coefficient_directly_debug(a,1.1,1.2,1.3,4); int i; for(i=0;i<35;i++){ printf("a[%d]=%0.15g, diff=%g\n",i,a[i],a[i]-expected[i]); assert(fabs(a[i]-expected[i])<eps); } } void test_directly_potential(int p){ double a[35]; double moment[35]; double x[3]={2,0.2,0.3}; double y[3]={0.12,0.13,0.14}; double charge_density=1000; double dx=y[0]; double dy=y[1]; double dz=y[2]; double tmp_x, tmp_y, tmp_z, R; double res_direct,res=0; int i,j,k,index=0; for(i=0;i<35;i++){ a[i]=0; moment[i]=0; } tmp_x = 1.0; for (i = 0; i < 5; i++) { tmp_y = 1.0; for (j = 0; j < 5 - i ; j++) { tmp_z = 1.0; for (k = 0; k < 5 - i - j; k++) { if (i+j+k<p+1){ moment[index] += charge_density *tmp_x * tmp_y * tmp_z; } index++; tmp_z *= dz; } tmp_y *= dy; } tmp_x *= dx; } compute_coefficient_directly_debug(a,x[0],x[1],x[2],p+1); for(i=0;i<35;i++){ res+=moment[i]*a[i]/ccc[i]; } /* * compute directly */ dx = x[0] - y[0]; dy = x[1] - y[1]; dz = x[2] - y[2]; R = dx * dx + dy * dy + dz*dz; res_direct = charge_density / sqrt(R); printf("exact result: %g fast sum: %g and p=%d rel_error=%g\n",res_direct,res,p,(res-res_direct)/res); } void test_single_layer_potential(int p){ double ***a = alloc_3d_double(p + 1, p + 1, p + 1); double ***moment = alloc_3d_double(p + 1, p + 1, p + 1); double x[3]={2,0.2,0.3}; double y[3]={0.12,0.13,0.14}; double charge_density=1000; double dx=y[0]; double dy=y[1]; double dz=y[2]; double tmp_x, tmp_y, tmp_z, R; double res_direct,res=0; int i,j,k; for (i = 0; i < p + 1; i++) { for (j = 0; j < p - i + 1; j++) { for (k = 0; k < p - i - j + 1; k++) { moment[i][j][k] = 0; } } } tmp_x = 1.0; for (i = 0; i < p + 1; i++) { tmp_y = 1.0; for (j = 0; j < p - i + 1; j++) { tmp_z = 1.0; for (k = 0; k < p - i - j + 1; k++) { moment[i][j][k] += charge_density *tmp_x * tmp_y * tmp_z; tmp_z *= dz; } tmp_y *= dy; } tmp_x *= dx; } compute_coefficient(a,x[0],x[1],x[2],p); for (i = 0; i < p + 1; i++) { for (j = 0; j < p - i + 1; j++) { for (k = 0; k < p - i - j + 1; k++) { res += a[i][j][k] * moment[i][j][k]; } } } /* * compute directly */ dx = x[0] - y[0]; dy = x[1] - y[1]; dz = x[2] - y[2]; R = dx * dx + dy * dy + dz*dz; res_direct = charge_density / sqrt(R); printf("exact result: %g fast sum: %g and p=%d rel_error=%g\n",res_direct,res,p,(res-res_direct)/res); } void test_double_layer_potential_I(int p){ /* * suppose yc=(0,0,0), triangle area A = 1, normal of triangle = norm(1,2,3) * the middle point of the triangle is y=(0.12,0.13,0.14) and the charge density equals 1. * the observing point is x = (11,0.2,0.3) */ double ***a = alloc_3d_double(p + 1, p + 1, p + 1); double ***moment = alloc_3d_double(p + 1, p + 1, p + 1); double x[3]={2,0.2,0.3}; double y[3]={0.12,0.13,0.14}; double n[3]={1,2,3}; double charge_density=1000; double dx=y[0]; double dy=y[1]; double dz=y[2]; double tmp_x, tmp_y, tmp_z, R; double res_direct,res=0; int i,j,k; vector_unit(n,n); for (i = 0; i < p + 1; i++) { for (j = 0; j < p - i + 1; j++) { for (k = 0; k < p - i - j + 1; k++) { moment[i][j][k] = 0; } } } tmp_x = 1.0; for (i = 0; i < p + 1; i++) { tmp_y = 1.0; for (j = 0; j < p - i + 1; j++) { tmp_z = 1.0; for (k = 0; k < p - i - j + 1; k++) { moment[i][j][k] += charge_density * ( i * tmp_x * tmp_y * tmp_z / dx * n[0] + j * tmp_x * tmp_y * tmp_z / dy * n[1] + k * tmp_x * tmp_y * tmp_z / dz * n[2]); tmp_z *= dz; } tmp_y *= dy; } tmp_x *= dx; } compute_coefficient(a,x[0],x[1],x[2],p); for (i = 0; i < p + 1; i++) { for (j = 0; j < p - i + 1; j++) { for (k = 0; k < p - i - j + 1; k++) { res += a[i][j][k] * moment[i][j][k]; } } } /* * compute directly */ dx = x[0] - y[0]; dy = x[1] - y[1]; dz = x[2] - y[2]; R = dx * dx + dy * dy + dz*dz; dx *= n[0]; dy *= n[1]; dz *= n[2]; res_direct = charge_density*(dx + dy + dz) / (R*sqrt(R)); printf("exact result: %g fast sum: %g and p=%d rel_error=%g\n",res_direct,res,p,(res-res_direct)/res); } int main() { int i; printf("Test single_layer_potential :\n"); for (i=1;i<8;i++){ test_single_layer_potential(i); } printf("Test double_layer_potential_I :\n"); for (i=1;i<8;i++){ test_double_layer_potential_I(i); } test_compute_coefficient_directly(); printf("Test direct method :\n"); for (i=1;i<5;i++){ test_directly_potential(i); } return 0; }
6,494
22.031915
110
c
finmag
finmag-master/native/src/treecode_bem/treecode_bem_I.c
#include "common.h" void bulid_indices_single_I(fastsum_plan *plan, struct octree_node *tree, int index, int *in, double *value, int compute_bm) { double R; int i, j; double *p0, *p1, *p2, *p3; double omega[3]; int k1, k2, k3; double T[3]={0,0,0}; R = pow2(plan->x_t[3 * index] - tree->x) + pow2(plan->x_t[3 * index + 1] - tree->y) + pow2(plan->x_t[3 * index + 2] - tree->z); if (plan->mac_square * R > tree->radius_square) { return; } if (tree->num_children > 0) { for (i = 0; i < tree->num_children; i++) { bulid_indices_single_I(plan, tree->children[i], index, in, value, compute_bm); } return; } else { for (i = tree->begin; i < tree->end; i++) { j = plan->x_s_ids[i]; p0 = &plan->x_t[3 * index]; k1 = plan->triangle_nodes[3 * j]; p1 = &plan->x_t[3 * k1]; in[k1] = 1; k2 = plan->triangle_nodes[3 * j + 1]; p2 = &plan->x_t[3 * k2]; in[k2] = 1; k3 = plan->triangle_nodes[3 * j + 2]; p3 = &plan->x_t[3 * k3]; in[k3] = 1; if (compute_bm > 0) { boundary_element(p0, p1, p2, p3, omega, T); value[k1] += omega[0]; value[k2] += omega[1]; value[k3] += omega[2]; } } return; } } void bulid_indices_I(fastsum_plan *plan) { int i, j; int *indices_n = malloc(plan->N_target * sizeof ( int)); double *values = malloc(plan->N_target * sizeof ( double)); int tmp_length_n = 0; int total_length_n = 0; for (i = 0; i < plan->N_target; i++) { indices_n[i] = 0; values[i] = 0; } for (i = 0; i < plan->N_target; i++) { bulid_indices_single_I(plan, plan->tree, i, indices_n, values, 0); for (j = 0; j < plan->N_target; j++) { if (indices_n[j] > 0) { total_length_n++; indices_n[j] = 0; } } } plan->total_length_n=total_length_n; plan->id_n = malloc(total_length_n * sizeof ( int)); plan->b_m = malloc(total_length_n * sizeof ( double)); for (i = 0; i < total_length_n; i++) { plan->id_n[i] = 0; plan->b_m[i] = 0; } total_length_n = 0; for (i = 0; i < plan->N_target; i++) { bulid_indices_single_I(plan, plan->tree, i, indices_n, values, 1); tmp_length_n = 0; for (j = 0; j < plan->N_target; j++) { if (indices_n[j] > 0) { plan->id_n[total_length_n] = j; plan->b_m[total_length_n] = values[j]; total_length_n++; tmp_length_n++; indices_n[j] = 0; values[j] = 0; } } plan->id_nn[i] = tmp_length_n; } free(indices_n); free(values); } inline double direct_compute_potential_leaf_I(fastsum_plan *plan, struct octree_node *tree, int index){ int i,k; double res=0; double dx,dy,dz,R; for (i = tree->begin; i < tree->end; i++) { dx = plan->x_t[3 * index]-plan->x_s[3 * i]; dy = plan->x_t[3 * index + 1]-plan->x_s[3 * i + 1]; dz = plan->x_t[3 * index + 2]-plan->x_s[3 * i + 2]; R = dx * dx + dy * dy + dz * dz; k = plan->x_s_ids[i]; dx *= plan->t_normal[3 * k]; dy *= plan->t_normal[3 * k + 1]; dz *= plan->t_normal[3 * k + 2]; res += plan->charge_density[k]*(dx + dy + dz) / (R*sqrt(R)); } return res; } double compute_potential_single_target_I(fastsum_plan *plan, struct octree_node *tree, int index) { double R; int i; double res = 0; double dx, dy, dz; double a[35]; R = pow2(plan->x_t[3 * index] - tree->x) + pow2(plan->x_t[3 * index + 1] - tree->y) + pow2(plan->x_t[3 * index + 2] - tree->z); if (plan->mac_square * R > tree->radius_square) { if (tree->num_particle<10){ res=direct_compute_potential_leaf_I(plan,tree,index); return res; } if (!tree->have_moment) { tree->mom = (double *)malloc(35 * sizeof (double)); tree->have_moment = 1; tree->need_upadte_moment = 1; } if (tree->need_upadte_moment) { compute_moment_directly(plan, tree, tree->mom, tree->x, tree->y, tree->z); tree->need_upadte_moment = 0; } dx = plan->x_t[3 * index] - tree->x; dy = plan->x_t[3 * index + 1] - tree->y; dz = plan->x_t[3 * index + 2] - tree->z; compute_coefficient_directly(a, dx, dy, dz, plan->p); for(i=0;i<35;i++){ res+=tree->mom[i]*a[i]; } return res; } else { if (tree->num_children > 0) { for (i = 0; i < tree->num_children; i++) { res += compute_potential_single_target_I(plan, tree->children[i], index); } return res; } else { //in this simplified version, we don't compute direct interaction return 0; } } } void fast_sum_I(fastsum_plan *plan, double *phi, double *u1) { int i, j, k; if (plan->mac > 0) { for (j = 0; j < plan->N_target; j++) { phi[j] = compute_potential_single_target_I(plan, plan->tree, j); } } int total_j = 0; for (i = 0; i < plan->N_target; i++) { for (j = 0; j < plan->id_nn[i]; j++) { k = plan->id_n[total_j]; phi[i] += plan->b_m[total_j] * u1[k]; total_j++; } phi[i] += plan->vert_bsa[i] * u1[i]; } reset_moment(plan,plan->tree); }
5,724
20.934866
103
c
finmag
finmag-master/native/src/treecode_bem/treecode_bem_II.c
#include "common.h" void bulid_indices_single_II(fastsum_plan *plan, struct octree_node *tree, int index, int *in, double *value, int compute_bm) { int i, j; double r,dx, dy, dz; double *p0, *p1, *p2, *p3; double omega[3]; int k1, k2, k3; double T[3]={0,0,0}; r = pow2(plan->x_t[3 * index] - tree->x) + pow2(plan->x_t[3 * index + 1] - tree->y) + pow2(plan->x_t[3 * index + 2] - tree->z); r = sqrt(r); if (r > tree->radius + plan->r_eps) { return; } if (tree->num_children > 0) { for (i = 0; i < tree->num_children; i++) { bulid_indices_single_II(plan, tree->children[i], index, in, value, compute_bm); } return; } else { for (i = tree->begin; i < tree->end; i++) { dx = plan->x_s[3 * i] - plan->x_t[3 * index]; dy = plan->x_s[3 * i + 1] - plan->x_t[3 * index + 1]; dz = plan->x_s[3 * i + 2] - plan->x_t[3 * index + 2]; r = sqrt(dx * dx + dy * dy + dz * dz); if (r <= plan->r_eps) { j = plan->x_s_ids[i]; p0 = &plan->x_t[3 * index]; k1 = plan->triangle_nodes[3 * j]; p1 = &plan->x_t[3 * k1]; in[k1] = 1; k2 = plan->triangle_nodes[3 * j + 1]; p2 = &plan->x_t[3 * k2]; in[k2] = 1; k3 = plan->triangle_nodes[3 * j + 2]; p3 = &plan->x_t[3 * k3]; in[k3] = 1; if (compute_bm > 0) { boundary_element(p0, p1, p2, p3, omega, T); value[k1] += omega[0]; value[k2] += omega[1]; value[k3] += omega[2]; } } } return; } } void bulid_indices_II(fastsum_plan *plan) { int i, j; int *indices_n = malloc(plan->N_target * sizeof ( int)); double *values = malloc(plan->N_target * sizeof ( double)); int tmp_length_n = 0; int total_length_n = 0; for (i = 0; i < plan->N_target; i++) { indices_n[i] = 0; values[i] = 0; } for (i = 0; i < plan->N_target; i++) { bulid_indices_single_II(plan, plan->tree, i, indices_n, values, 0); for (j = 0; j < plan->N_target; j++) { if (indices_n[j] > 0) { total_length_n++; indices_n[j] = 0; } } } plan->total_length_n=total_length_n; plan->id_n = malloc(total_length_n * sizeof ( int)); plan->b_m = malloc(total_length_n * sizeof ( double)); for (i = 0; i < total_length_n; i++) { plan->id_n[i] = 0; plan->b_m[i] = 0; } total_length_n = 0; for (i = 0; i < plan->N_target; i++) { bulid_indices_single_II(plan, plan->tree, i, indices_n, values, 1); tmp_length_n = 0; for (j = 0; j < plan->N_target; j++) { if (indices_n[j] > 0) { plan->id_n[total_length_n] = j; plan->b_m[total_length_n] = values[j]; total_length_n++; tmp_length_n++; indices_n[j] = 0; values[j] = 0; } } plan->id_nn[i] = tmp_length_n; } free(indices_n); free(values); } inline double direct_compute_potential_leaf(fastsum_plan *plan, struct octree_node *tree, int index){ int i,k; double res=0; double dx,dy,dz,R; for (i = tree->begin; i < tree->end; i++) { dx = plan->x_t[3 * index]-plan->x_s[3 * i]; dy = plan->x_t[3 * index + 1]-plan->x_s[3 * i + 1]; dz = plan->x_t[3 * index + 2]-plan->x_s[3 * i + 2]; R = dx * dx + dy * dy + dz * dz; if (R>plan->r_eps_squre){ k = plan->x_s_ids[i]; dx *= plan->t_normal[3 * k]; dy *= plan->t_normal[3 * k + 1]; dz *= plan->t_normal[3 * k + 2]; res += plan->charge_density[k]*(dx + dy + dz) / (R*sqrt(R)); } } return res; } double compute_potential_single_target_II(fastsum_plan *plan, struct octree_node *tree, int index, double ***a) { double R,r; int i, j, k; double res = 0; double dx, dy, dz; R = pow2(plan->x_t[3 * index] - tree->x) + pow2(plan->x_t[3 * index + 1] - tree->y) + pow2(plan->x_t[3 * index + 2] - tree->z); r = sqrt(R); if (plan->mac * r > tree->radius) { if (plan->r_eps>r*(1-plan->mac)){ res=direct_compute_potential_leaf(plan,tree,index); return res; } if (!tree->have_moment) { tree->moment = alloc_3d_double(plan->p + 1, plan->p + 1, plan->p + 1); tree->have_moment = 1; tree->need_upadte_moment = 1; } if (tree->need_upadte_moment) { compute_moment(plan, tree, tree->moment, tree->x, tree->y, tree->z); tree->need_upadte_moment = 0; } dx = plan->x_t[3 * index] - tree->x; dy = plan->x_t[3 * index + 1] - tree->y; dz = plan->x_t[3 * index + 2] - tree->z; compute_coefficient(a, dx, dy, dz, plan->p); for (i = 0; i < plan->p + 1; i++) { for (j = 0; j < plan->p - i + 1; j++) { for (k = 0; k < plan->p - i - j + 1; k++) { res += a[i][j][k] * tree->moment[i][j][k]; } } } return res; } else { if (tree->num_children > 0) { res = 0; for (i = 0; i < tree->num_children; i++) { res += compute_potential_single_target_II(plan, tree->children[i], index, a); } return res; } else { res=direct_compute_potential_leaf(plan,tree,index); return res; } } } void compute_analytical_potential(fastsum_plan *plan, double *phi, double *u1) { int i, j, k; int total_j = 0; for (i = 0; i < plan->N_target; i++) { for (j = 0; j < plan->id_nn[i]; j++) { k = plan->id_n[total_j]; phi[i] += plan->b_m[total_j] * u1[k]; total_j++; } phi[i]+=plan->vert_bsa[i] * u1[i]; } } void direct_sum_I(fastsum_plan *plan, double *phi, double *u1) { int i, j, k; double dx,dy,dz; double res,R; for (j = 0; j < plan->N_target; j++) { res=0; phi[j]=0; for(i=0;i<plan->N_source;i++){ dx = plan->x_t[3 * j]-plan->x_s[3 * i]; dy = plan->x_t[3 * j + 1]-plan->x_s[3 * i + 1]; dz = plan->x_t[3 * j + 2]-plan->x_s[3 * i + 2]; R = dx * dx + dy * dy + dz * dz; if (R>plan->r_eps_squre){ k = plan->x_s_ids[i]; dx *= plan->t_normal[3 * k]; dy *= plan->t_normal[3 * k + 1]; dz *= plan->t_normal[3 * k + 2]; res += plan->charge_density[k]*(dx + dy + dz) / (R*sqrt(R)); } } phi[j]=res; } compute_analytical_potential(plan,phi,u1); reset_moment(plan,plan->tree); } void fast_sum_II(fastsum_plan *plan, double *phi, double *u1) { int j; double ***a = alloc_3d_double(plan->p + 1, plan->p + 1, plan->p + 1); for (j = 0; j < plan->N_target; j++) { phi[j] = compute_potential_single_target_II(plan, plan->tree, j, a); } compute_analytical_potential(plan,phi,u1); reset_moment(plan,plan->tree); free_3d_double(a, plan->p + 1); }
7,463
21.618182
113
c
finmag
finmag-master/src/finmag/physics/native/derivatives.h
#pragma once namespace dolfin { namespace finmag { void dm_damping(double const& alpha, double const& gamma, double const& m_x, double const& m_y, double const& m_z, double const& mp_x, double const& mp_y, double const& mp_z, double const& H_x, double const& H_y, double const& H_z, double const& Hp_x, double const& Hp_y, double const& Hp_z, double& jtimes_x, double& jtimes_y, double& jtimes_z); void dm_precession(double const& alpha, double const& gamma, double const& m_x, double const& m_y, double const& m_z, double const& mp_x, double const& mp_y, double const& mp_z, double const& H_x, double const& H_y, double const& H_z, double const& Hp_x, double const& Hp_y, double const& Hp_z, double& jtimes_x, double& jtimes_y, double& jtimes_z); void dm_relaxation(double const& c, double const& m_x, double const& m_y, double const& m_z, double const& mp_x, double const& mp_y, double const& mp_z, double& jtimes_x, double& jtimes_y, double& jtimes_z); }}
1,293
55.26087
83
h
finmag
finmag-master/src/finmag/physics/native/equation.h
#include <memory> #include <vector> #include <dolfin/la/GenericVector.h> #include <dolfin/la/PETScVector.h> #include "terms.h" /* compile_extension_module needs code to be wrapped in the dolfin namespace */ namespace dolfin { namespace finmag { class Equation { public: Equation(GenericVector const& m, GenericVector const& H, GenericVector& dmdt); void solve(); void solve_with(PETScVector const& vec_m, PETScVector const& vec_H, PETScVector &vec_dmdt); void sundials_jtimes_serial(Array<double> const& mp, Array<double> const& Hp, Array<double>& jtimes); std::shared_ptr<GenericVector> get_pinned_nodes() const; void set_pinned_nodes(std::shared_ptr<GenericVector> const& value); std::shared_ptr<GenericVector> get_saturation_magnetisation() const; void set_saturation_magnetisation(std::shared_ptr<GenericVector> const& value); std::shared_ptr<GenericVector> get_current_density() const; void set_current_density(std::shared_ptr<GenericVector> const& value); std::shared_ptr<GenericVector> get_alpha() const; void set_alpha(std::shared_ptr<GenericVector> const& value); double get_gamma() const; void set_gamma(double value); double get_parallel_relaxation_rate() const; void set_parallel_relaxation_rate(double value); bool get_do_precession() const; void set_do_precession(bool value); void slonczewski(double d, double P, Array<double> const& p, double lambda, double epsilonprime); void slonczewski_disable(); bool slonczewski_status() const; void zhangli(double u_0, double beta); void zhangli_disable(); bool zhangli_status() const; private: GenericVector const& magnetisation; GenericVector const& effective_field; GenericVector& derivative; std::shared_ptr<GenericVector> pinned_nodes; std::shared_ptr<GenericVector> saturation_magnetisation; std::shared_ptr<GenericVector> current_density; std::shared_ptr<GenericVector> alpha; double gamma; double parallel_relaxation_rate; bool do_precession; bool do_slonczewski; std::unique_ptr<Slonczewski> stt_slonczewski; bool do_zhangli; std::unique_ptr<ZhangLi> stt_zhangli; /* temporary measure to see if we have disabled dolfin's * reordering of degrees of freedom. */ bool reorder_dofs_serial; }; }}
2,735
41.75
113
h
finmag
finmag-master/src/finmag/physics/native/terms.h
#pragma once #include <dolfin/function/Function.h> namespace dolfin { namespace finmag { void damping(double const& alpha, double const& gamma, double const& m_x, double const& m_y, double const& m_z, double const& H_x, double const& H_y, double const& H_z, double& dm_x, double& dm_y, double& dm_z); void precession(double const& alpha, double const& gamma, double const& m_x, double const& m_y, double const& m_z, double const& H_x, double const& H_y, double const& H_z, double& dm_x, double& dm_y, double& dm_z); void relaxation(double const& c, double const& m_x, double const& m_y, double const& m_z, double& dm_x, double& dm_y, double& dm_z); class Slonczewski { public: Slonczewski(double const d, double const P, Array<double> const& p, double const lambda, double const epsilonprime); void compute(double const& alpha, double const& gamma, double const& J, double const& Ms, double const& m_x, double const& m_y, double const& m_z, double& dm_x, double& dm_y, double& dm_z); private: double lambda; double P; /* degree of polarisation */ double d; /* thickness of free layer in SI units */ double p_x, p_y, p_z; /* fixed layer magnetisation direction */ double epsilonprime; /* secondary spin-transfer term */ }; class ZhangLi { public: ZhangLi(double const u_0, double const beta); void compute(double const& alpha, double const& Ms, double const& m_x, double const& m_y, double const& m_z, double const& g_x, double const& g_y, double const& g_z, double& dm_x, double& dm_y, double& dm_z); private: double u_0; double beta; }; }}
2,063
42.914894
81
h
null
pytorch-main/android/pytorch_android/src/main/cpp/pytorch_jni_common.h
#pragma once #include <c10/util/FunctionRef.h> #include <fbjni/fbjni.h> #include <torch/csrc/api/include/torch/types.h> #include "caffe2/serialize/read_adapter_interface.h" #include "cmake_macros.h" #ifdef __ANDROID__ #include <android/log.h> #define ALOGI(...) \ __android_log_print(ANDROID_LOG_INFO, "pytorch-jni", __VA_ARGS__) #define ALOGE(...) \ __android_log_print(ANDROID_LOG_ERROR, "pytorch-jni", __VA_ARGS__) #endif #if defined(TRACE_ENABLED) && defined(__ANDROID__) #include <android/trace.h> #include <dlfcn.h> #endif namespace pytorch_jni { constexpr static int kDeviceCPU = 1; constexpr static int kDeviceVulkan = 2; c10::DeviceType deviceJniCodeToDeviceType(jint deviceJniCode); class Trace { public: #if defined(TRACE_ENABLED) && defined(__ANDROID__) typedef void* (*fp_ATrace_beginSection)(const char* sectionName); typedef void* (*fp_ATrace_endSection)(void); static fp_ATrace_beginSection ATrace_beginSection; static fp_ATrace_endSection ATrace_endSection; #endif static void ensureInit() { if (!Trace::is_initialized_) { init(); Trace::is_initialized_ = true; } } static void beginSection(const char* name) { Trace::ensureInit(); #if defined(TRACE_ENABLED) && defined(__ANDROID__) ATrace_beginSection(name); #endif } static void endSection() { #if defined(TRACE_ENABLED) && defined(__ANDROID__) ATrace_endSection(); #endif } Trace(const char* name) { ensureInit(); beginSection(name); } ~Trace() { endSection(); } private: static void init(); static bool is_initialized_; }; class MemoryReadAdapter final : public caffe2::serialize::ReadAdapterInterface { public: explicit MemoryReadAdapter(const void* data, off_t size) : data_(data), size_(size){}; size_t size() const override { return size_; } size_t read(uint64_t pos, void* buf, size_t n, const char* what = "") const override { memcpy(buf, (int8_t*)(data_) + pos, n); return n; } ~MemoryReadAdapter() {} private: const void* data_; off_t size_; }; class JIValue : public facebook::jni::JavaClass<JIValue> { using DictCallback = c10::function_ref<facebook::jni::local_ref<JIValue>( c10::Dict<c10::IValue, c10::IValue>)>; public: constexpr static const char* kJavaDescriptor = "Lorg/pytorch/IValue;"; constexpr static int kTypeCodeNull = 1; constexpr static int kTypeCodeTensor = 2; constexpr static int kTypeCodeBool = 3; constexpr static int kTypeCodeLong = 4; constexpr static int kTypeCodeDouble = 5; constexpr static int kTypeCodeString = 6; constexpr static int kTypeCodeTuple = 7; constexpr static int kTypeCodeBoolList = 8; constexpr static int kTypeCodeLongList = 9; constexpr static int kTypeCodeDoubleList = 10; constexpr static int kTypeCodeTensorList = 11; constexpr static int kTypeCodeList = 12; constexpr static int kTypeCodeDictStringKey = 13; constexpr static int kTypeCodeDictLongKey = 14; static facebook::jni::local_ref<JIValue> newJIValueFromAtIValue( const at::IValue& ivalue, DictCallback stringDictCallback = newJIValueFromStringDict, DictCallback intDictCallback = newJIValueFromIntDict); static at::IValue JIValueToAtIValue( facebook::jni::alias_ref<JIValue> jivalue); private: static facebook::jni::local_ref<JIValue> newJIValueFromStringDict( c10::Dict<c10::IValue, c10::IValue>); static facebook::jni::local_ref<JIValue> newJIValueFromIntDict( c10::Dict<c10::IValue, c10::IValue>); }; void common_registerNatives(); } // namespace pytorch_jni
3,585
24.985507
80
h
null
pytorch-main/aten/src/ATen/ATen.h
#pragma once #if !defined(_MSC_VER) && __cplusplus < 201703L #error C++17 or later compatible compiler is required to use ATen. #endif #include <ATen/Context.h> #include <ATen/Device.h> #include <ATen/DeviceGuard.h> #include <ATen/DimVector.h> #include <ATen/Dispatch.h> #include <ATen/Formatting.h> #include <ATen/Functions.h> #include <ATen/NamedTensor.h> #include <ATen/ScalarOps.h> #include <ATen/Tensor.h> #include <ATen/TensorGeometry.h> #include <ATen/TensorIndexing.h> #include <ATen/TensorOperators.h> #include <ATen/Version.h> #include <ATen/core/ATenGeneral.h> #include <ATen/core/Generator.h> #include <ATen/core/Reduction.h> #include <ATen/core/Scalar.h> #include <ATen/core/UnsafeFromTH.h> #include <ATen/core/ivalue.h> #include <ATen/core/jit_type.h> #include <c10/core/Allocator.h> #include <c10/core/InferenceMode.h> #include <c10/core/Layout.h> #include <c10/core/Storage.h> #include <c10/core/TensorOptions.h> #include <c10/util/Exception.h> // TODO: try to remove this // There is some back story, see https://github.com/pytorch/pytorch/issues/48684 #include <ATen/NativeFunctions.h>
1,107
28.157895
80
h
null
pytorch-main/aten/src/ATen/AccumulateType.h
#pragma once #include <ATen/Config.h> #include <c10/core/ScalarType.h> #include <c10/util/BFloat16.h> #include <c10/util/Half.h> // Defines the accumulation type for a scalar type. // Example: // using accscalar_t = acc_type<scalar_t, /*is_cuda*/true>; // // Accumulation types are an important concept in numeric computing // because you frequently want to perform intermediate computations // at a higher precision than the input and output precision, to avoid // compounding internal rounding errors. Accumulation is the most // well-known intermediate computation (it is of great importance for // sum reduction and matrix multiply, for example), but in PyTorch // acc_type ends up getting used for all sorts of other intermediate // computations, so it perhaps would be more accurately (ahem) called an // "accurate" type. acc_type is especially important for reduced // precision operations like float16 and bfloat16, where relatively // benign looking inputs can easily end up overflowing/underflowing. // // acc_type is parametrized by whether or not you are running on CUDA // or not, because on CUDA double precision operations are expensive // and so by default, we don't actually want to use double as an // acc_type on CUDA. A lot of things are typed out below, but // basically, the table is generated by a few rules: // // If bool: // Use 'bool' as acc_type. // If floating point: // If CUDA, use 'float' as acc_type (unless scalar_t is double), // otherwise (CPU) use 'double' // If integral: // Use 'int64_t' as acc_type // // You're not forced to use this template; if you happen to know // something specific about your use case, you can specify your own // desired behavior. This template, however, will give you a reasonable // default that will work for all dtypes supported in PyTorch. #if defined(__CUDACC__) #include <cuda.h> #include <cuda_fp16.h> #elif defined(__HIPCC__) #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #endif namespace at { template <typename T, bool is_cuda> struct AccumulateType {}; #if defined(__CUDACC__) || defined(__HIPCC__) template <> struct AccumulateType<half, true> { using type = float; }; #endif template <> struct AccumulateType<BFloat16, true> { using type = float; }; template <> struct AccumulateType<Half, true> { using type = float; }; template <> struct AccumulateType<float, true> { using type = float; }; template <> struct AccumulateType<double, true> { using type = double; }; template <> struct AccumulateType<int8_t, true> { using type = int64_t; }; template <> struct AccumulateType<uint8_t, true> { using type = int64_t; }; template <> struct AccumulateType<char, true> { using type = int64_t; }; template <> struct AccumulateType<int16_t, true> { using type = int64_t; }; template <> struct AccumulateType<int32_t, true> { using type = int64_t; }; template <> struct AccumulateType<int64_t, true> { using type = int64_t; }; template <> struct AccumulateType<bool, true> { using type = bool; }; template <> struct AccumulateType<Half, false> { using type = float; }; template <> struct AccumulateType<BFloat16, false> { using type = float; }; template <> struct AccumulateType<c10::complex<Half>, false> { using type = c10::complex<float>; }; template <> struct AccumulateType<c10::complex<float>, false> { using type = c10::complex<double>; }; template <> struct AccumulateType<c10::complex<double>, false> { using type = c10::complex<double>; }; template <> struct AccumulateType<c10::complex<Half>, true> { using type = c10::complex<float>; }; template <> struct AccumulateType<c10::complex<float>, true> { using type = c10::complex<float>; }; template <> struct AccumulateType<c10::complex<double>, true> { using type = c10::complex<double>; }; template <> struct AccumulateType<float, false> { using type = double; }; template <> struct AccumulateType<double, false> { using type = double; }; template <> struct AccumulateType<int8_t, false> { using type = int64_t; }; template <> struct AccumulateType<uint8_t, false> { using type = int64_t; }; template <> struct AccumulateType<char, false> { using type = int64_t; }; template <> struct AccumulateType<int16_t, false> { using type = int64_t; }; template <> struct AccumulateType<int32_t, false> { using type = int64_t; }; template <> struct AccumulateType<int64_t, false> { using type = int64_t; }; template <> struct AccumulateType<bool, false> { using type = bool; }; template <typename T, bool is_cuda> using acc_type = typename AccumulateType<T, is_cuda>::type; TORCH_API c10::ScalarType toAccumulateType(c10::ScalarType type, bool is_cuda); } // namespace at
4,697
25.1
79
h
null
pytorch-main/aten/src/ATen/CPUApplyUtils.h
#pragma once #include <ATen/CollapseDims.h> #include <ATen/Parallel.h> #include <ATen/TensorUtils.h> #include <c10/util/irange.h> #include <cstring> #include <limits> #include <utility> namespace at { /* * The basic strategy for apply is as follows: * * 1. Starting with the outermost index, loop until we reach a dimension where * the data is no longer contiguous, i.e. the stride at that dimension is not * equal to the size of the tensor defined by the outer dimensions. Let's call * this outer (contiguous) tensor A. Note that if the Tensor is contiguous, then * A is equal to the entire Tensor. Let's call the inner tensor B. * * 2. We loop through the indices in B, starting at its outermost dimension. For * example, if B is a 2x2 matrix, then we do: * * B[0][0] * B[0][1] * B[1][0] * B[1][1] * * We set the offset into the underlying storage as (storageOffset + stride_B * * index_B), i.e. basically we compute the offset into the storage as we would * normally for a Tensor. But because we are guaranteed the subsequent data is * contiguous in memory, we can simply loop for sizeof(A) iterations and perform * the operation, without having to follow the order described by the strides of * A. * * 3. As an optimization, we merge dimensions of A that are contiguous in * memory. For example, if A is a 3x3x3x3 tensor narrowed from a 3x3x4x3 tensor, * then the first two dimensions can be merged for the purposes of APPLY, * reducing the number of nested loops. */ inline Tensor sort_strides(Tensor& tensor_) { IntArrayRef strides = tensor_.strides(); std::vector<int64_t> indices; indices.reserve(tensor_.ndimension()); for (const auto i : c10::irange(tensor_.ndimension())) { indices.push_back(i); } std::sort(indices.begin(), indices.end(), [&strides](int64_t i1, int64_t i2) { return strides[i1] > strides[i2]; }); Tensor tensor = tensor_.permute(indices); return tensor; } template <typename T, int N> struct strided_tensor_iter_fixed { public: T* data_ = NULL; int64_t dim_ = 0; int64_t counter_[N] = {0}; int64_t sizes_[N] = {0}; int64_t strides_[N] = {0}; strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete; void operator=(strided_tensor_iter_fixed const& x) = delete; strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default; strided_tensor_iter_fixed(Tensor& tensor, bool sort_strides = false) : data_(tensor.data_ptr<T>()) { (void)sort_strides; // Suppress unused variable warning std::memset(counter_, 0, sizeof(int64_t) * N); if (tensor.dim() > 0) { std::memcpy( sizes_, tensor.sizes().data(), tensor.dim() * sizeof(int64_t)); std::memcpy( strides_, tensor.strides().data(), tensor.dim() * sizeof(int64_t)); } dim_ = std::get<1>(collapse_dims(sizes_, strides_, tensor.ndimension())); } }; template <typename T> struct strided_tensor_iter { private: public: T* data_ = NULL; int64_t dim_; std::vector<int64_t> counter_; std::vector<int64_t> sizes_; std::vector<int64_t> strides_; strided_tensor_iter(strided_tensor_iter const&) = delete; void operator=(strided_tensor_iter const& x) = delete; strided_tensor_iter(strided_tensor_iter&&) = default; strided_tensor_iter(Tensor& tensor) : data_(tensor.data_ptr<T>()), dim_(tensor.ndimension()), counter_(dim_, 0), sizes_(tensor.sizes().vec()), strides_(tensor.strides().vec()) { dim_ = std::get<1>(collapse_dims(sizes_.data(), strides_.data(), dim_)); } }; inline bool _all_equal_numel(at::ArrayRef<Tensor> tensors) { if (tensors.empty()) return true; int64_t all_numel = tensors[0].numel(); for (const auto i : c10::irange(1, tensors.size())) { if (tensors[i].numel() != all_numel) return false; } return true; } inline std::string _all_equal_numel_error(at::ArrayRef<Tensor> tensors) { std::ostringstream oss; oss << "inconsistent tensor size, expected "; for (size_t i = 0; i < tensors.size() - 1; i++) { oss << tensors[i].sizes() << ", "; } oss << "and " << tensors[tensors.size() - 1].sizes() << " to have the same number of elements, but got "; for (size_t i = 0; i < tensors.size() - 1; i++) { oss << tensors[i].numel() << ", "; } oss << "and " << tensors[tensors.size() - 1].numel() << " elements respectively"; return oss.str(); } inline bool _apply_preamble(ArrayRef<Tensor> tensors) { checkDeviceType("CPU_tensor_apply", tensors, kCPU); checkLayout("CPU_tensor_apply", tensors, kStrided); if (!_all_equal_numel(tensors)) AT_ERROR(_all_equal_numel_error(tensors)); // An empty tensor has no elements for (auto& t : tensors) if (t.numel() == 0) return false; return true; } inline int64_t _max_dim_tensors(ArrayRef<Tensor> tensors) { int64_t dim = 0; for (auto& t : tensors) dim = std::max(dim, t.ndimension()); return dim; } inline void iterate(int64_t /*size*/){}; template <typename Arg, typename... Args> inline void iterate(int64_t size, Arg& iter, Args&... iter_tail) { iter.counter_[iter.dim_ - 1] += size; iter.data_ = iter.data_ + size * iter.strides_[iter.dim_ - 1]; iterate(size, iter_tail...); } inline bool iterate_continue() { return true; }; template <typename Arg, typename... Args> inline bool iterate_continue(Arg& iter, Args&... iter_tail) { return iter.counter_[iter.dim_ - 1] < iter.sizes_[iter.dim_ - 1] && iterate_continue(iter_tail...); } inline int64_t max_iterate_size() { return std::numeric_limits<int64_t>::max(); }; template <typename Arg, typename... Args> inline int64_t max_iterate_size(Arg& iter, Args&... iter_tail) { return std::min( (iter.sizes_[iter.dim_ - 1] - iter.counter_[iter.dim_ - 1]), max_iterate_size(iter_tail...)); } inline void iterate_overflow(){}; template <typename Arg, typename... Args> inline void iterate_overflow(Arg& iter, Args&... iter_tail) { if (iter.counter_[iter.dim_ - 1] == iter.sizes_[iter.dim_ - 1]) { for (int64_t i = iter.dim_ - 1; i > 0; i--) { if (iter.counter_[i] == iter.sizes_[i]) { iter.counter_[i] = 0; iter.counter_[i - 1]++; iter.data_ = iter.data_ - (iter.sizes_[i] * iter.strides_[i]) + iter.strides_[i - 1]; } } } iterate_overflow(iter_tail...); } inline void forward(int64_t /*offset*/){}; template <typename Arg, typename... Args> inline void forward(int64_t offset, Arg& iter, Args&... iter_tail) { int64_t multi = offset; for (int64_t i = iter.dim_ - 1; i >= 0; i--) { int64_t inc = multi % iter.sizes_[i]; multi = multi / iter.sizes_[i]; iter.data_ = iter.data_ + inc * iter.strides_[i]; iter.counter_[i] += inc; } forward(offset, iter_tail...); } inline int64_t max_dim() { return 0; } template <typename Arg, typename... Args> inline int64_t max_dim(Arg& iter, Args&... iter_tail) { return std::max(iter.dim_, max_dim(iter_tail...)); } inline void apply_op(){}; template <typename Op, typename... Args> inline void apply_op( int64_t numel, int64_t offset, const Op& op, Args... iters) { // For 0-dim tensors if (numel == 1 && max_dim(iters...) == 0) { op(*iters.data_...); return; } if (offset > 0) forward(offset, iters...); // Splitting this into chunks helps the compiler create faster assembly for (int64_t i = 0; i < numel;) { for (; iterate_continue(iters...) && i < numel;) { op(*iters.data_...); iterate(1, iters...); i++; } iterate_overflow(iters...); } } /* Apply a pointwise operator to sequence of tensors The calling convention for op is a function/functor that takes the same number of pointers of type scalar as the number of given tensors. For example, to compute a = b * c, op would be of the form: [](scalar* a_val, const scalar* b_val, const scalar* c_val) { a_val[0] = b_val[0] * c_val[0]; }; */ template <typename scalar1, typename scalar2, typename Op> inline void CPU_tensor_apply2(Tensor tensor1, Tensor tensor2, const Op op) { if (!_apply_preamble({tensor1, tensor2})) return; if (_max_dim_tensors({tensor1, tensor2}) <= 8) { apply_op( tensor1.numel(), 0, op, strided_tensor_iter_fixed<scalar1, 8>(tensor1), strided_tensor_iter_fixed<scalar2, 8>(tensor2)); } else { apply_op( tensor1.numel(), 0, op, strided_tensor_iter<scalar1>(tensor1), strided_tensor_iter<scalar2>(tensor2)); } } template <typename scalar1, typename scalar2, typename scalar3, typename Op> inline void CPU_tensor_apply3( Tensor tensor1, Tensor tensor2, Tensor tensor3, const Op op) { if (!_apply_preamble({tensor1, tensor2, tensor3})) return; if (_max_dim_tensors({tensor1, tensor2, tensor3}) <= 8) { apply_op( tensor1.numel(), 0, op, strided_tensor_iter_fixed<scalar1, 8>(tensor1), strided_tensor_iter_fixed<scalar2, 8>(tensor2), strided_tensor_iter_fixed<scalar3, 8>(tensor3)); } else { apply_op( tensor1.numel(), 0, op, strided_tensor_iter<scalar1>(tensor1), strided_tensor_iter<scalar2>(tensor2), strided_tensor_iter<scalar3>(tensor3)); } } template < typename scalar1, typename scalar2, typename scalar3, typename scalar4, typename Op> inline void CPU_tensor_apply4( Tensor tensor1, Tensor tensor2, Tensor tensor3, Tensor tensor4, const Op op) { if (!_apply_preamble({tensor1, tensor2, tensor3, tensor4})) return; if (_max_dim_tensors({tensor1, tensor2, tensor3, tensor4}) <= 8) { apply_op( tensor1.numel(), 0, op, strided_tensor_iter_fixed<scalar1, 8>(tensor1), strided_tensor_iter_fixed<scalar2, 8>(tensor2), strided_tensor_iter_fixed<scalar3, 8>(tensor3), strided_tensor_iter_fixed<scalar4, 8>(tensor4)); } else { apply_op( tensor1.numel(), 0, op, strided_tensor_iter<scalar1>(tensor1), strided_tensor_iter<scalar2>(tensor2), strided_tensor_iter<scalar3>(tensor3), strided_tensor_iter<scalar4>(tensor4)); } } } // namespace at
10,258
28.822674
80
h
null
pytorch-main/aten/src/ATen/CPUFixedAllocator.h
#pragma once #include <c10/core/Allocator.h> #include <c10/util/Exception.h> // This file creates a fake allocator that just throws exceptions if // it is actually used. // state passed to the allocator is the std::function<void(void*)> called // when the blob is release by ATen namespace at { static cpu_fixed_malloc(void*, ptrdiff_t) { AT_ERROR("attempting to resize a tensor view of an external blob"); } static cpu_fixed_realloc(void*, void*, ptrdiff_t) { AT_ERROR("attempting to resize a tensor view of an external blob"); } static cpu_fixed_free(void* state, void* allocation) { auto on_release = static_cast<std::function<void(void*)>*>(state); (*on_release)(allocation); delete on_release; } static Allocator CPU_fixed_allocator = { cpu_fixed_malloc, cpu_fixed_realloc, cpu_fixed_free}; } // namespace at
845
23.882353
73
h
null
pytorch-main/aten/src/ATen/CPUGeneratorImpl.h
#pragma once #include <ATen/core/Generator.h> #include <ATen/core/MT19937RNGEngine.h> #include <c10/core/GeneratorImpl.h> #include <c10/util/Optional.h> namespace at { struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl { // Constructors CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val); ~CPUGeneratorImpl() override = default; // CPUGeneratorImpl methods std::shared_ptr<CPUGeneratorImpl> clone() const; void set_current_seed(uint64_t seed) override; void set_offset(uint64_t offset) override; uint64_t get_offset() const override; uint64_t current_seed() const override; uint64_t seed() override; void set_state(const c10::TensorImpl& new_state) override; c10::intrusive_ptr<c10::TensorImpl> get_state() const override; static c10::DeviceType device_type(); uint32_t random(); uint64_t random64(); c10::optional<float> next_float_normal_sample(); c10::optional<double> next_double_normal_sample(); void set_next_float_normal_sample(c10::optional<float> randn); void set_next_double_normal_sample(c10::optional<double> randn); at::mt19937 engine(); void set_engine(at::mt19937 engine); private: CPUGeneratorImpl* clone_impl() const override; at::mt19937 engine_; c10::optional<float> next_float_normal_sample_; c10::optional<double> next_double_normal_sample_; }; namespace detail { TORCH_API const Generator& getDefaultCPUGenerator(); TORCH_API Generator createCPUGenerator(uint64_t seed_val = default_rng_seed_val); } // namespace detail } // namespace at
1,538
29.78
66
h
null
pytorch-main/aten/src/ATen/CachedTensorUtils.h
#pragma once #include <ATen/ATen.h> namespace at { namespace caching { // Some systems (just cudagraphs currently) will persist a static tensor output // whose TensorImpl does not change across iterations. For these tensors caching // dtype conversions is invalid. Additionally, there will be an extra reference // count to these cached tensors that would prevent buffer inplacing and other // checks on tensor uniqueness. If we are not using these systems the enabled // flag will be false and we will avoid the hash lookup. TORCH_API bool is_cached_tensor(const at::Tensor& t); TORCH_API void add_cached_tensor(const at::Tensor& t); TORCH_API void remove_cached_tensor(const at::Tensor& t); TORCH_API void set_cached_tensors_enabled(bool enable); // For gradient buffer stealing we will adjust the use count of tensors // which are persisted by cudagraphs, just as we need to adjust reference // count of tensors with hooks. TORCH_API size_t adjusted_use_count(const at::Tensor& t); } // namespace caching } // namespace at
1,032
37.259259
80
h
null
pytorch-main/aten/src/ATen/CollapseDims.h
#include <c10/util/Exception.h> #include <utility> namespace at { /* [collapse dims] Updates sizes, and strides to reflect a "collapse" of the info, possibly excluding the optional excludeDim. A "collapsed" version of the info is the fewest dims that order the tensor's elements in the same way as the original info. If excludeDim is specified, the collapse is the fewest dims that order the tensor's elements as the original and preserve the excluded dimension, unless the tensor collapses to a point. This function returns a pair of values. 1) The (new) index of the preserved dimension if excludeDim is specified. 0 if the tensor is collapsed to a point. -1 otherwise. 2) The new number of dimensions. */ template <typename T> inline std::pair<int64_t, int64_t> collapse_dims( T* sizes, T* strides, int64_t dims, const int excludeDim = -1) { TORCH_CHECK( excludeDim >= -1 && excludeDim < dims, "expected excluded dim between -1 and dims - 1"); int64_t stopDim = (excludeDim == -1) ? dims : excludeDim; int64_t newIndex = -1; int64_t oldIndex = 0; int64_t remappedExcludedDim = -1; while (oldIndex < dims) { // Finds a dimension to collapse into for (; oldIndex < stopDim; ++oldIndex) { if (sizes[oldIndex] == 1) { continue; } ++newIndex; sizes[newIndex] = sizes[oldIndex]; strides[newIndex] = strides[oldIndex]; ++oldIndex; break; } // Collapses dims for (; oldIndex < stopDim; ++oldIndex) { if (sizes[oldIndex] == 1) { continue; } if (strides[newIndex] == sizes[oldIndex] * strides[oldIndex]) { sizes[newIndex] *= sizes[oldIndex]; strides[newIndex] = strides[oldIndex]; } else { ++newIndex; sizes[newIndex] = sizes[oldIndex]; strides[newIndex] = strides[oldIndex]; } } // Handles excludeDim being set (oldIndex == excludeDim) if (oldIndex != dims) { // Preserves excluded dimension ++newIndex; sizes[newIndex] = sizes[oldIndex]; strides[newIndex] = strides[oldIndex]; remappedExcludedDim = newIndex; // Restarts iteration after excludeDim ++oldIndex; stopDim = dims; } } // Handles special case of all dims size 1 if (newIndex == -1 || (newIndex == 0 && sizes[0] == 1)) { dims = 1; sizes[0] = 1; strides[0] = 1; return std::pair<int64_t, int64_t>(0, 1); } dims = newIndex + 1; return std::pair<int64_t, int64_t>(remappedExcludedDim, dims); } } // namespace at
2,560
25.957895
77
h
null
pytorch-main/aten/src/ATen/Context.h
#pragma once #include <ATen/CPUGeneratorImpl.h> #include <ATen/LinalgBackend.h> #include <ATen/core/ATenGeneral.h> #include <ATen/core/DeprecatedTypeProperties.h> #include <ATen/core/Generator.h> #include <ATen/core/LegacyTypeDispatch.h> #include <ATen/detail/CUDAHooksInterface.h> #include <ATen/detail/HIPHooksInterface.h> #include <ATen/detail/MPSHooksInterface.h> #include <ATen/detail/MTIAHooksInterface.h> #include <ATen/detail/ORTHooksInterface.h> #include <ATen/detail/XPUHooksInterface.h> #include <c10/core/QEngine.h> #include <c10/core/impl/DeviceGuardImplInterface.h> #include <c10/util/CallOnce.h> #include <c10/util/Exception.h> #include <c10/util/env.h> #include <c10/util/irange.h> #include <cstdint> #include <memory> #include <mutex> namespace at { class Tensor; enum class TORCH_API Float32MatmulPrecision { HIGHEST, HIGH, MEDIUM }; class TORCH_API Context { public: Context(); const Generator& defaultGenerator(Device device) { c10::DeviceType device_type = device.type(); initCUDAIfNeeded(device_type); initHIPIfNeeded(device_type); if (device_type == at::kCPU) { return at::detail::getDefaultCPUGenerator(); } else if (device_type == at::kCUDA) { return at::detail::getCUDAHooks().getDefaultCUDAGenerator(device.index()); } else if (device_type == at::kMPS) { return at::detail::getMPSHooks().getDefaultMPSGenerator(); } else { AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled."); } } Device getDeviceFromPtr(void* data, c10::DeviceType device_type) { initCUDAIfNeeded(device_type); initHIPIfNeeded(device_type); if (device_type == at::kCPU) { return c10::DeviceType::CPU; } else if (device_type == at::kCUDA) { return at::detail::getCUDAHooks().getDeviceFromPtr(data); } else { AT_ERROR(c10::DeviceTypeName(device_type), " device type not enabled."); } } static bool isPinnedPtr(const void* data) { return detail::getCUDAHooks().isPinnedPtr(data); } static bool hasOpenMP(); static bool hasMKL(); static bool hasLAPACK(); static bool hasMKLDNN(); static bool hasMAGMA() { return detail::getCUDAHooks().hasMAGMA(); } static bool hasCUDA() { return detail::getCUDAHooks().hasCUDA(); } static bool hasMTIA() { return detail::getMTIAHooks().hasMTIA(); } static bool hasCUDART() { return detail::getCUDAHooks().hasCUDART(); } static long versionCUDART() { return detail::getCUDAHooks().versionCUDART(); } static bool hasCuDNN() { return detail::getCUDAHooks().hasCuDNN(); } static long versionCuDNN() { return detail::getCUDAHooks().versionCuDNN(); } static bool hasCuSOLVER() { return detail::getCUDAHooks().hasCuSOLVER(); } static bool hasHIP() { return detail::getHIPHooks().hasHIP(); } static bool hasMPS() { return detail::getMPSHooks().hasMPS(); } static bool hasIPU() { return c10::impl::hasDeviceGuardImpl(c10::DeviceType::IPU); } static bool hasXLA() { return c10::impl::hasDeviceGuardImpl(c10::DeviceType::XLA); } static bool hasXPU() { return detail::getXPUHooks().hasXPU(); } static bool hasLazy() { return c10::impl::hasDeviceGuardImpl(c10::DeviceType::Lazy); } static bool hasORT() { return c10::impl::hasDeviceGuardImpl(c10::DeviceType::ORT); } // defined in header so that getNonVariableType has ability to inline // call_once check. getNonVariableType is called fairly frequently void lazyInitCUDA() { c10::call_once(thc_init, [&] { detail::getCUDAHooks().initCUDA(); }); } void lazyInitHIP() { c10::call_once(thh_init, [&] { detail::getHIPHooks().initHIP(); }); } static const at::cuda::NVRTC& getNVRTC() { return detail::getCUDAHooks().nvrtc(); } static bool setFlushDenormal(bool on); // NB: This method is *purely* whether or not a user requested // that CuDNN was enabled, it doesn't actually say anything about // whether or not CuDNN is actually usable. Use cudnn_is_acceptable // to test this instead bool userEnabledCuDNN() const; void setUserEnabledCuDNN(bool e); bool userEnabledMkldnn() const; void setUserEnabledMkldnn(bool e); bool benchmarkCuDNN() const; void setBenchmarkCuDNN(bool); int benchmarkLimitCuDNN() const; void setBenchmarkLimitCuDNN(int); bool deterministicCuDNN() const; void setDeterministicCuDNN(bool); // Note [Disabling Fused SDP Kernels] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Flash and Memory Efficient SDP kernels are enabled by default. // However, they can be disabled by setting // at::globalContext().setUserEnabledFlashSDP(false) flag. // This is useful for debugging purposes. For example, if you want to // compare the performance of the flash SDP kernels with the unfused // kernel, you can disable the flash SDP kernels. By disabling // the math SDP kernel, you can force your code to use flash kernels. // The math SDP kernel can be disabled by setting // at::globalContext().setUserEnabledMathSDP(false) flag. void setSDPUseFlash(bool); bool userEnabledFlashSDP() const; void setSDPUseMemEfficient(bool); bool userEnabledMemEfficientSDP() const; void setSDPUseMath(bool); bool userEnabledMathSDP() const; at::LinalgBackend linalgPreferredBackend() const; void setLinalgPreferredBackend(at::LinalgBackend); // Note [Enabling Deterministic Operations] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Operations in PyTorch that normally act nondeterministically, but have an // alternate deterministic implementation, should satisfy the following // requirements: // // * Include this comment: "See Note [Enabling Deterministic Operations]" // // * Check the value of `at::globalContext().deterministicAlgorithms()` to // toggle // between nondeterministic and deterministic implementations. // // * Have an entry in the list of PyTorch operations that toggle between // nondeterministic // and deterministic implementations, in the docstring of // `use_deterministic_algorithms()` in torch/__init__.py // // `example_func()` below shows an example of toggling between // nondeterministic and deterministic implementations: // // void example_func() { // // See Note [Enabling Deterministic Operations] // if (at::globalContext().deterministicAlgorithms()) { // example_func_deterministic(); // } else { // example_func_nondeterministic(); // } // } bool deterministicAlgorithms() const; bool deterministicAlgorithmsWarnOnly() const; void setDeterministicAlgorithms(bool, bool); // Note [Writing Nondeterministic Operations] // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Operations in PyTorch that act nondeterministically and do not have an // alternate deterministic implementation should satisfy the following // requirements: // // * Include this comment: "See Note [Writing Nondeterministic Operations]" // // * Include a comment explaining why the operation is nondeterministic. // // * Throw an error when `Context::deterministicAlgorithms()` is true. Most // of the time, this should be accomplished by calling // `at::globalContext().alertNotDeterminstic()`. However, if the // nondeterministic behavior is caused by the CuBLAS workspace // configuration in CUDA >= 10.2, // `at::globalContext().alertCuBLASConfigNotDeterministic()` should be // called instead (in this case, a comment explaining why the operation is // nondeterministic is not necessary). See below for details on these // methods. // // * Have an entry in the list of nondeterministic PyTorch operations in the // docstring of `use_deterministic_algorithms()` in torch/__init__.py // // * Have a test function in `test/test_torch.py` whose name begins with // `test_nondeterministic_alert_`. Alternatively, if CuBLAS workspace // configuration is the reason for nondeterminism, the operation should be // included in the `test_cublas_config_nondeterministic_alert` test. Any new // tests should ideally follow a pattern similar to the existing ones. // // `example_func()` below shows an example of the comments and error-throwing // code for a nondeterministic operation: // // void example_func() { // // See Note [Writing Nondeterministic Operations] // // Nondeterministic because <reason> // at::globalContext().alertNondeterministic("example_func"); // ... // } // Throws an error if `Context::deterministicAlgorithms()` is true static void alertNotDeterministic(c10::string_view const& caller); // Throws an error if `Context::deterministicAlgorithms()` is true, CUDA // >= 10.2, and CUBLAS_WORKSPACE_CONFIG is not set to either ":16:8" or // ":4096:8". For more details: // https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility void alertCuBLASConfigNotDeterministic() const; void setFloat32MatmulPrecision(const std::string& s); bool allowTF32CuDNN() const; void setAllowTF32CuDNN(bool); bool allowTF32CuBLAS() const; void setAllowTF32CuBLAS(bool); Float32MatmulPrecision float32MatmulPrecision() const; void setFloat32MatmulPrecision(Float32MatmulPrecision p); bool allowFP16ReductionCuBLAS() const; void setAllowFP16ReductionCuBLAS(bool); bool allowBF16ReductionCuBLAS() const; void setAllowBF16ReductionCuBLAS(bool); at::QEngine qEngine() const; void setQEngine(at::QEngine e); static const std::vector<at::QEngine>& supportedQEngines(); static bool isXNNPACKAvailable(); void setCheckSparseTensorInvariants(bool e); bool checkSparseTensorInvariants() const; // This method is used to release the original weight after pre-packing. // It should be called once before loading/running the model. // NB: By default it is set to true for mobile builds. void setReleaseWeightsWhenPrepacking(bool e); bool releaseWeightsWhenPrepacking() const; void setDisplayVmapFallbackWarnings(bool enabled); bool areVmapFallbackWarningsEnabled() const; void setDefaultMobileCPUAllocator(); void unsetDefaultMobileCPUAllocator(); private: void initCUDAIfNeeded(c10::DeviceType p) { if (p == c10::DeviceType::CUDA) { lazyInitCUDA(); } } void initHIPIfNeeded(c10::DeviceType p) { if (p == c10::DeviceType::HIP) { lazyInitHIP(); } } static bool checkCuBLASConfigDeterministic(); c10::once_flag thc_init; c10::once_flag thh_init; bool enabled_cudnn = true; bool deterministic_cudnn = false; bool _deterministic_algorithms = false; bool _deterministic_algorithms_warn_only = false; bool enabled_flashSDP = true; bool enabled_mem_efficientSDP = true; bool enabled_mathSDP = true; #ifdef USE_ROCM bool benchmark_cudnn = true; #else bool benchmark_cudnn = false; #endif Float32MatmulPrecision float32_matmul_precision = c10::utils::check_env("TORCH_ALLOW_TF32_CUBLAS_OVERRIDE") == true ? at::Float32MatmulPrecision::HIGH : at::Float32MatmulPrecision::HIGHEST; int benchmark_limit_cudnn = 10; bool allow_tf32_cudnn = true; bool allow_fp16_reduction_cublas = true; bool allow_bf16_reduction_cublas = true; bool enabled_mkldnn = true; at::LinalgBackend linalg_preferred_backend = at::LinalgBackend::Default; #ifdef C10_MOBILE bool release_original_weights = true; #else bool release_original_weights = false; #endif bool display_vmap_fallback_warnings_ = false; c10::optional<at::QEngine> quantized_engine = c10::nullopt; bool enable_sparse_tensor_invariant_checks = false; Allocator* prev_allocator_ptr_{nullptr}; }; TORCH_API Context& globalContext(); static inline void init() { globalContext(); } TORCH_API Allocator* getCPUAllocator(); static inline DeprecatedTypeProperties& getDeprecatedTypeProperties( Backend p, ScalarType s) { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( p, s); } static inline DeprecatedTypeProperties& CPU(ScalarType s) { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( Backend::CPU, s); } static inline DeprecatedTypeProperties& CUDA(ScalarType s) { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( Backend::CUDA, s); } static inline DeprecatedTypeProperties& HIP(ScalarType s) { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( Backend::HIP, s); } static inline DeprecatedTypeProperties& MPS(ScalarType s) { return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties( Backend::MPS, s); } static inline bool hasCUDA() { return globalContext().hasCUDA(); } static inline bool hasMTIA() { return globalContext().hasMTIA(); } static inline bool hasHIP() { return globalContext().hasHIP(); } static inline bool hasIPU() { return globalContext().hasIPU(); } static inline bool hasXLA() { return globalContext().hasXLA(); } static inline bool hasMPS() { return globalContext().hasMPS(); } static inline bool hasORT() { return globalContext().hasORT(); } static inline bool hasXPU() { return globalContext().hasXPU(); } // Despite its name, this function returns the number of *CUDA* GPUs. static inline size_t getNumGPUs() { // WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS // FUNCTION. If you are interested in interrogating the number of // devices for a specific device type, add that function to the // relevant library (e.g., similar to at::cuda::device_count()) if (hasCUDA() && hasHIP()) { throw std::runtime_error( "Enabling both CUDA and HIP in ATen is not supported, as HIP masquerades " "to be CUDA (e.g., when you say CUDA, on a HIP build of ATen, this actually " "means HIP. Rebuild PyTorch with one or the other disabled."); } else if (hasCUDA()) { return detail::getCUDAHooks().getNumGPUs(); } else if (hasHIP()) { return detail::getHIPHooks().getNumGPUs(); } else { return 0; } } static inline bool hasOpenMP() { return globalContext().hasOpenMP(); } static inline bool hasMKL() { return globalContext().hasMKL(); } static inline bool hasLAPACK() { return globalContext().hasLAPACK(); } static inline bool hasMAGMA() { return globalContext().hasMAGMA(); } static inline bool hasMKLDNN() { return globalContext().hasMKLDNN(); } static inline void manual_seed(uint64_t seed) { auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU); { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(gen.mutex()); gen.set_current_seed(seed); } // NB: Sometimes we build with CUDA, but we don't have any GPUs // available. In that case, we must not seed CUDA; it will fail! const auto num_gpus = detail::getCUDAHooks().getNumGPUs(); if (hasCUDA() && num_gpus > 0) { for (const auto i : c10::irange(num_gpus)) { auto cuda_gen = globalContext().defaultGenerator( Device(at::kCUDA, static_cast<c10::DeviceIndex>(i))); { // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(cuda_gen.mutex()); cuda_gen.set_current_seed(seed); } } } if (hasMPS()) { auto mps_gen = globalContext().defaultGenerator(c10::DeviceType::MPS); // See Note [Acquire lock when using random generators] std::lock_guard<std::mutex> lock(mps_gen.mutex()); mps_gen.set_current_seed(seed); } } // When the global flag `allow_tf32` is set to true, cuBLAS handles are // automatically configured to use math mode CUBLAS_TF32_TENSOR_OP_MATH. // For some operators, such as addmv, TF32 offers no performance improvement // but causes precision loss. To help this case, this class implements // a RAII guard that can be used to quickly disable TF32 within its scope. // // Usage: // NoTF32Guard disable_tf32; struct TORCH_API NoTF32Guard { NoTF32Guard(); ~NoTF32Guard(); static bool should_disable_tf32(); private: bool changed = false; }; #ifdef USE_ROCM struct TORCH_API ROCmBackwardPassGuard { ROCmBackwardPassGuard(); ~ROCmBackwardPassGuard(); static bool is_backward_pass(); private: static thread_local bool is_backward_pass_; }; #endif } // namespace at
16,327
32.390593
85
h
null
pytorch-main/aten/src/ATen/DLConvertor.h
#pragma once #include <ATen/ATen.h> #include <ATen/Tensor.h> #include <ATen/dlpack.h> // this convertor will: // 1) take a Tensor object and wrap it in the DLPack tensor // 2) take a dlpack tensor and convert it to the ATen Tensor namespace at { TORCH_API ScalarType toScalarType(const DLDataType& dtype); TORCH_API DLManagedTensor* toDLPack(const Tensor& src); TORCH_API Tensor fromDLPack(const DLManagedTensor* src); TORCH_API Tensor fromDLPack(const DLManagedTensor* src, std::function<void(void*)> deleter); TORCH_API DLDataType getDLDataType(const Tensor& t); TORCH_API DLDevice getDLContext(const Tensor& tensor, const int64_t& device_id); } // namespace at
669
29.454545
80
h
null
pytorch-main/aten/src/ATen/DeviceGuard.h
#pragma once #include <ATen/core/IListRef.h> #include <ATen/core/Tensor.h> #include <c10/core/DeviceGuard.h> #include <c10/core/ScalarType.h> // TensorList whyyyyy namespace at { // Are you here because you're wondering why DeviceGuard(tensor) no // longer works? For code organization reasons, we have temporarily(?) // removed this constructor from DeviceGuard. The new way to // spell it is: // // OptionalDeviceGuard guard(device_of(tensor)); /// Return the Device of a Tensor, if the Tensor is defined. inline c10::optional<Device> device_of(const Tensor& t) { if (t.defined()) { return c10::make_optional(t.device()); } else { return c10::nullopt; } } inline c10::optional<Device> device_of(const c10::optional<Tensor>& t) { return t.has_value() ? device_of(t.value()) : c10::nullopt; } /// Return the Device of a TensorList, if the list is non-empty and /// the first Tensor is defined. (This function implicitly assumes /// that all tensors in the list have the same device.) inline c10::optional<Device> device_of(ITensorListRef t) { if (!t.empty()) { return device_of(t.front()); } else { return c10::nullopt; } } } // namespace at
1,185
27.238095
72
h
null
pytorch-main/aten/src/ATen/EmptyTensor.h
#pragma once #include <ATen/core/TensorBase.h> namespace at { namespace detail { template <class ArrayRefType> inline void check_size_nonnegative(ArrayRefType size) { for (const auto& x : size) { TORCH_CHECK( x >= 0, "Trying to create tensor with negative dimension ", x, ": ", size); } } TORCH_API size_t computeStorageNbytesContiguous( IntArrayRef sizes, size_t itemsize, size_t storage_offset = 0); TORCH_API SymInt computeStorageNbytesContiguous( SymIntArrayRef sizes, const SymInt& itemsize, const SymInt& storage_offset = 0); TORCH_API size_t computeStorageNbytes( IntArrayRef sizes, IntArrayRef strides, size_t itemsize, size_t storage_offset = 0); TORCH_API SymInt computeStorageNbytes( SymIntArrayRef sizes, SymIntArrayRef strides, const SymInt& itemsize, const SymInt& storage_offset = 0); TORCH_API TensorBase empty_generic( IntArrayRef size, c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type, c10::optional<c10::MemoryFormat> memory_format_opt); TORCH_API TensorBase empty_strided_generic( IntArrayRef size, IntArrayRef stride, c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type); TORCH_API TensorBase empty_strided_symint_generic( SymIntArrayRef size, SymIntArrayRef stride, c10::Allocator* allocator, c10::DispatchKeySet ks, ScalarType scalar_type); TORCH_API TensorBase empty_cpu( IntArrayRef size, ScalarType dtype, bool pin_memory = false, c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt); TORCH_API TensorBase empty_cpu( IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt); TORCH_API TensorBase empty_cpu(IntArrayRef size, const TensorOptions& options); TORCH_API TensorBase empty_strided_cpu( IntArrayRef size, IntArrayRef stride, ScalarType dtype, bool pin_memory = false); TORCH_API TensorBase empty_strided_cpu( IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt); TORCH_API TensorBase empty_strided_cpu( IntArrayRef size, IntArrayRef stride, const TensorOptions& options); TORCH_API TensorBase empty_meta( IntArrayRef size, ScalarType dtype, c10::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt); TORCH_API TensorBase empty_meta( IntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt); TORCH_API TensorBase empty_symint_meta( SymIntArrayRef size, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt, c10::optional<c10::MemoryFormat> memory_format_opt); TORCH_API TensorBase empty_meta(IntArrayRef size, const TensorOptions& options); TORCH_API TensorBase empty_strided_meta(IntArrayRef size, IntArrayRef stride, ScalarType dtype); TORCH_API TensorBase empty_strided_meta( IntArrayRef size, IntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt); TORCH_API TensorBase empty_strided_meta( IntArrayRef size, IntArrayRef stride, const TensorOptions& options); TORCH_API TensorBase empty_strided_symint_meta( SymIntArrayRef size, SymIntArrayRef stride, ScalarType dtype); TORCH_API TensorBase empty_strided_symint_meta( SymIntArrayRef size, SymIntArrayRef stride, c10::optional<ScalarType> dtype_opt, c10::optional<Layout> layout_opt, c10::optional<Device> device_opt, c10::optional<bool> pin_memory_opt); TORCH_API TensorBase empty_strided_symint_meta( SymIntArrayRef size, SymIntArrayRef stride, const TensorOptions& options); } // namespace detail } // namespace at
4,324
27.267974
80
h
null
pytorch-main/aten/src/ATen/ExpandBase.h
#include <ATen/core/TensorBase.h> // Broadcasting utilities for working with TensorBase namespace at { namespace internal { TORCH_API TensorBase expand_slow_path(const TensorBase& self, IntArrayRef size); } // namespace internal inline c10::MaybeOwned<TensorBase> expand_size( const TensorBase& self, IntArrayRef size) { if (size.equals(self.sizes())) { return c10::MaybeOwned<TensorBase>::borrowed(self); } return c10::MaybeOwned<TensorBase>::owned( at::internal::expand_slow_path(self, size)); } c10::MaybeOwned<TensorBase> expand_size(TensorBase&& self, IntArrayRef size) = delete; inline c10::MaybeOwned<TensorBase> expand_inplace( const TensorBase& tensor, const TensorBase& to_expand) { return expand_size(to_expand, tensor.sizes()); } c10::MaybeOwned<TensorBase> expand_inplace( const TensorBase& tensor, TensorBase&& to_expand) = delete; } // namespace at
914
28.516129
80
h
null
pytorch-main/aten/src/ATen/ExpandUtils.h
#pragma once #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/view.h> #include <ATen/ops/view_copy.h> #endif #include <ATen/Tensor.h> #include <ATen/core/DimVector.h> #include <c10/util/Exception.h> #include <c10/util/MaybeOwned.h> #include <c10/util/irange.h> #include <functional> #include <sstream> #include <tuple> #include <utility> namespace at { TORCH_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b); TORCH_API std::vector<SymInt> infer_size_symint( SymIntArrayRef a, SymIntArrayRef b); TORCH_API DimVector infer_size_dimvector(IntArrayRef a, IntArrayRef b); TORCH_API SymDimVector infer_size_symdimvector(SymIntArrayRef a, SymIntArrayRef b); // Named type instead of a pair/tuple so that we can be sure to // construct the vectors in place and get NRVO. template <typename Container> struct InferExpandGeometryResult { Container sizes; Container strides; explicit InferExpandGeometryResult(size_t ndim) : sizes(ndim), strides(ndim) {} explicit InferExpandGeometryResult(IntArrayRef sizes_, size_t ndim) : sizes(sizes_.begin(), sizes_.end()), strides(ndim) {} }; TORCH_API std::tuple<std::vector<int64_t>, std::vector<int64_t>> inferExpandGeometry( IntArrayRef tensor_sizes, IntArrayRef tensor_strides, IntArrayRef sizes); TORCH_API InferExpandGeometryResult<DimVector> inferExpandGeometry_dimvector( IntArrayRef tensor_sizes, IntArrayRef tensor_strides, IntArrayRef sizes); TORCH_API std::vector<int64_t> infer_dense_strides( IntArrayRef tensor_sizes, IntArrayRef tensor_strides); // True if input shapes are expandable // NOTE: infer_size did a similar check, please keep them sync if change is // needed inline bool are_expandable(IntArrayRef shape1, IntArrayRef shape2) { size_t ndim1 = shape1.size(); size_t ndim2 = shape2.size(); size_t ndim = ndim1 < ndim2 ? ndim1 : ndim2; for (int64_t i = ndim - 1; i >= 0; --i) { if (shape1[--ndim1] == shape2[--ndim2] || shape1[ndim1] == 1 || shape2[ndim2] == 1) { continue; } return false; } return true; } // avoid copy-construction of Tensor by using a reference_wrapper. inline void check_defined( std::initializer_list<std::reference_wrapper<const Tensor>> tensors, const char* api_name) { for (auto& t : tensors) { if (!t.get().defined()) { AT_ERROR(api_name, "(...) called with an undefined Tensor"); } } } // NOTE [ ExpandUtils Borrowing ] // // Functions in ExpandUtils return `c10::MaybeOwned<Tensor>` because // expansion may not actually be needed, in which case we can improve // efficiency by returning // `c10::MaybeOwned<Tensor>::borrowed(to_expand)`. However, this means // that you need to be careful: the returned `c10::MaybeOwned<Tensor>` // must not outlive the original `Tensor` object that `to_expand` // referred to! The deleted rvalue reference overloads of these // functions help with this by preventing trivial use of a temporary // resulting from a function call, but it is still possible to make a // mistake. inline c10::MaybeOwned<Tensor> expand_inplace( const Tensor& tensor, const Tensor& to_expand) { if (tensor.sym_sizes().equals(to_expand.sym_sizes())) { return c10::MaybeOwned<Tensor>::borrowed(to_expand); } return c10::MaybeOwned<Tensor>::owned( to_expand.expand_symint(tensor.sym_sizes())); } inline c10::MaybeOwned<Tensor> expand_inplace( const Tensor& tensor, Tensor&& to_expand) = delete; inline c10::MaybeOwned<Tensor> expand_inplace( const Tensor& tensor, const Tensor& to_expand, const char* api_name) { check_defined({tensor, to_expand}, api_name); return expand_inplace(tensor, to_expand); } inline c10::MaybeOwned<Tensor> expand_inplace( const Tensor& tensor, Tensor&& to_expand, const char* api_name) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace( const Tensor& tensor, const Tensor& to_expand1, const Tensor& to_expand2) { if (tensor.sizes().equals(to_expand1.sizes()) && tensor.sizes().equals((to_expand2.sizes()))) { return std::make_tuple( c10::MaybeOwned<Tensor>::borrowed(to_expand1), c10::MaybeOwned<Tensor>::borrowed(to_expand2)); } return std::make_tuple( c10::MaybeOwned<Tensor>::owned(to_expand1.expand(tensor.sizes())), c10::MaybeOwned<Tensor>::owned(to_expand2.expand(tensor.sizes()))); } inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace( const Tensor& tensor, Tensor&& to_expand1, const Tensor& to_expand2) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace( const Tensor& tensor, const Tensor& to_expand1, Tensor&& to_expand2) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace(const Tensor& tensor, Tensor&& to_expand1, Tensor&& to_expand2) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace( const Tensor& tensor, const Tensor& to_expand1, const Tensor& to_expand2, const char* api_name) { check_defined({tensor, to_expand1, to_expand2}, api_name); return expand_inplace(tensor, to_expand1, to_expand2); } inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace( const Tensor& tensor, Tensor&& to_expand1, const Tensor& to_expand2, const char* api_name) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace( const Tensor& tensor, const Tensor& to_expand1, Tensor&& to_expand2, const char* api_name) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_inplace( const Tensor& tensor, Tensor&& to_expand1, Tensor&& to_expand2, const char* api_name) = delete; // See NOTE [ ExpandUtils Borrowing ] above for `MaybeOwned` explanation. inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace(const Tensor& to_expand1, const Tensor& to_expand2) { if (to_expand1.sizes().equals(to_expand2.sizes())) { return std::make_tuple( c10::MaybeOwned<Tensor>::borrowed(to_expand1), c10::MaybeOwned<Tensor>::borrowed(to_expand2)); } auto expanded_size = infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes()); return std::make_tuple( c10::MaybeOwned<Tensor>::owned(to_expand1.expand(expanded_size)), c10::MaybeOwned<Tensor>::owned(to_expand2.expand(expanded_size))); } inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace(Tensor&& to_expand1, const Tensor& to_expand2) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace(const Tensor& to_expand1, Tensor&& to_expand2) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, const Tensor& to_expand2, const char* api_name) { check_defined({to_expand1, to_expand2}, api_name); return expand_outplace(to_expand1, to_expand2); } inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, const Tensor& to_expand2, const char* api_name) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, Tensor&& to_expand2, const char* api_name) = delete; inline std::tuple<c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, Tensor&& to_expand2, const char* api_name) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, const Tensor& to_expand2, const Tensor& to_expand3) { if (to_expand1.sizes().equals(to_expand2.sizes()) && to_expand1.sizes().equals(to_expand3.sizes())) { return std::make_tuple( c10::MaybeOwned<Tensor>::borrowed(to_expand1), c10::MaybeOwned<Tensor>::borrowed(to_expand2), c10::MaybeOwned<Tensor>::borrowed(to_expand3)); } auto expanded_size12 = infer_size_dimvector(to_expand1.sizes(), to_expand2.sizes()); auto expanded_size = infer_size_dimvector(expanded_size12, to_expand3.sizes()); return std::make_tuple( c10::MaybeOwned<Tensor>::owned(to_expand1.expand(expanded_size)), c10::MaybeOwned<Tensor>::owned(to_expand2.expand(expanded_size)), c10::MaybeOwned<Tensor>::owned(to_expand3.expand(expanded_size))); } inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, const Tensor& to_expand2, const Tensor& to_expand3) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, Tensor&& to_expand2, const Tensor& to_expand3) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, Tensor&& to_expand2, const Tensor& to_expand3) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, const Tensor& to_expand2, Tensor&& to_expand3) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, const Tensor& to_expand2, Tensor&& to_expand3) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace(Tensor&& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, const Tensor& to_expand2, const Tensor& to_expand3, const char* api_name) { check_defined({to_expand1, to_expand2, to_expand3}, api_name); return expand_outplace(to_expand1, to_expand2, to_expand3); } inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, const Tensor& to_expand2, const Tensor& to_expand3, const char* api_name) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, Tensor&& to_expand2, const Tensor& to_expand3, const char* api_name) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, Tensor&& to_expand2, const Tensor& to_expand3, const char* api_name) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, const Tensor& to_expand2, Tensor&& to_expand3, const char* api_name) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, const Tensor& to_expand2, Tensor&& to_expand3, const char* api_name) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( const Tensor& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3, const char* api_name) = delete; inline std::tuple< c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>, c10::MaybeOwned<Tensor>> expand_outplace( Tensor&& to_expand1, Tensor&& to_expand2, Tensor&& to_expand3, const char* api_name) = delete; inline c10::MaybeOwned<Tensor> expand_size( const Tensor& to_expand, IntArrayRef sizes) { if (to_expand.sizes().equals(sizes)) { return c10::MaybeOwned<Tensor>::borrowed(to_expand); } return c10::MaybeOwned<Tensor>::owned(to_expand.expand(sizes)); } inline c10::MaybeOwned<Tensor> expand_size( Tensor&& to_expand, IntArrayRef sizes) = delete; inline c10::MaybeOwned<Tensor> expand_size( const Tensor& to_expand, IntArrayRef sizes, const char* api_name) { check_defined({to_expand}, api_name); return expand_size(to_expand, sizes); } inline c10::MaybeOwned<Tensor> expand_size( Tensor&& to_expand, IntArrayRef sizes, const char* api_name) = delete; inline std::vector<Tensor> expand_outplace(TensorList to_expand) { // expands a list of Tensors; ignores undefined (null) tensors bool first = true; DimVector sizes; for (const auto i : c10::irange(to_expand.size())) { if (!to_expand[i].defined()) { continue; } else if (first) { sizes = to_expand[i].sizes(); first = false; } else { sizes = infer_size_dimvector(sizes, to_expand[i].sizes()); } } std::vector<Tensor> result(to_expand.size()); for (const auto i : c10::irange(to_expand.size())) { if (!to_expand[i].defined()) { continue; } else if (to_expand[i].sizes().equals(sizes)) { result[i] = to_expand[i]; } else { result[i] = to_expand[i].expand(sizes); } } return result; } template <typename T> inline Tensor _sum_to( Tensor tensor, const c10::ArrayRef<T> shape, bool always_return_non_view = false) { if (shape.size() == 0) { return tensor.sum(); } auto sizes = at::symint::sizes<T>(tensor); c10::SmallVector<int64_t, 8> reduce_dims; const int64_t leading_dims = sizes.size() - shape.size(); for (const auto i : c10::irange(leading_dims)) { reduce_dims.push_back(i); } for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) { if (shape[i - leading_dims] == 1 && sizes[i] != 1) { reduce_dims.push_back(i); } } if (!reduce_dims.empty()) { tensor = tensor.sum(reduce_dims, /*keepdim=*/true); } if (always_return_non_view) { // This is only actually used by the functionalization pass. // We want to be able to guarantee that this function doesn't return a view // of the input. return leading_dims > 0 ? at::symint::view_copy<T>(tensor, shape) : tensor.clone(); } else { return leading_dims > 0 ? at::symint::view<T>(tensor, shape) : tensor; } } inline Tensor sum_to( Tensor tensor, const c10::SymIntArrayRef shape, bool always_return_non_view = false) { return _sum_to(std::move(tensor), shape, always_return_non_view); } // Sums `tensor` repeatedly to produce a tensor of shape `shape`. // Precondition: is_expandable_to(shape, tensor.sizes()) must be true inline Tensor sum_to( Tensor tensor, const IntArrayRef shape, bool always_return_non_view = false) { return _sum_to(std::move(tensor), shape, always_return_non_view); } static inline bool is_expandable_to( SymIntArrayRef shape, c10::SymIntArrayRef desired) { size_t ndim = shape.size(); size_t target_dim = desired.size(); if (ndim > target_dim) { return false; } for (const auto i : c10::irange(ndim)) { const auto& size = shape[ndim - i - 1]; const auto& target = desired[target_dim - i - 1]; if (size != target && size != 1) { return false; } } return true; } static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) { auto sym_shape = c10::SymIntArrayRef( reinterpret_cast<const c10::SymInt*>(shape.data()), shape.size()); auto sym_desired = c10::SymIntArrayRef( reinterpret_cast<const c10::SymInt*>(desired.data()), desired.size()); return is_expandable_to(sym_shape, sym_desired); } } // namespace at
16,222
29.783681
80
h
null
pytorch-main/aten/src/ATen/FuncTorchTLS.h
#pragma once #include <c10/macros/Macros.h> #include <memory> namespace at { namespace functorch { // NOTE [functorch TLS in pytorch/pytorch] // // functorch lives out-of-tree. However, it has some TLS that needs to be // propagated. The solution for that is we store a pointer to the TLS // inside pytorch/pytorch and extend FuncTorchTLSBase inside functorch to // include whatever functorch needs. // // We need to store a pointer due to the indirection: // inside functorch, we will create a subclass of FunctorchTLSBase called // FuncTorchTLSImpl that actually contains metadata, like the DynamicLayerStack. // FuncTorchTLSBase doesn't have any metadata because it hasn't been defined // yet. // // Here in pytorch/pytorch, we will pass around FuncTorchTLSBase*, but inside // functorch, we will assign a FuncTorchTLSImpl* to the FunctorchTLSBase*. // We can't directly pass around FunctorchTLSBase (without a pointer) because // FuncTorchTLSImpl does not fit inside a FuncTorchTLSBase by virtue of having // more elements. struct TORCH_API FuncTorchTLSBase { virtual ~FuncTorchTLSBase() = default; virtual std::unique_ptr<FuncTorchTLSBase> deepcopy() const = 0; virtual int64_t checkSupportsSingleLevelAutogradFunction() const = 0; virtual void checkSupportsCppAutogradFunction() const = 0; virtual void checkSupportsInplaceRequiresGrad() const = 0; virtual void checkSupportsRetainGrad() const = 0; }; // returns deepcopy of the functorch tls TORCH_API std::unique_ptr<FuncTorchTLSBase> getCopyOfFuncTorchTLS(); // sets the functorch tls. always does a deep copy. TORCH_API void setFuncTorchTLS( const std::shared_ptr<const FuncTorchTLSBase>& state); // get a mutable reference to the functorch tls TORCH_API std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor(); } // namespace functorch } // namespace at
1,841
36.591837
80
h
null
pytorch-main/aten/src/ATen/FunctionalStorageImpl.h
#pragma once #include <ATen/Tensor.h> namespace at { namespace functionalization { // See Note [Functionalization Pass In Core] // ViewMeta is a class used by the functionalization pass to navigate between // a base tensor and a view tensor. // For example, if I call `b = a.view1(...)` // the functionalization pass will generate and store a ViewMeta on b that looks // like: // // ViewMeta( // [<captures>](const Tensor& base, int64_t mutated_view_idx) { // return base.view1(...); // }, // [<captures>](const at::Tensor& base, const at::Tensor& mutated_view, // int64_t mutated_view_idx) -> at::Tensor { // return at::functionalization::impl::view1_inverse(base, mutated_view, // ...); // } // // The forward_fn lambda describes how to replay view1 on a tensor. // // The reverse_fn lambda describes how, given a tensor that is already a view, // how to get the corresponding base tensor. See Note [Functionalization Pass: // View Inverses] for details. struct ViewMeta { ViewMeta( std::function<Tensor(const Tensor&, int64_t)> forward, std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse, int64_t out_idx = 0) : forward_fn(std::move(forward)), reverse_fn(std::move(reverse)), out_index(out_idx) {} std::function<Tensor(const Tensor&, int64_t)> forward_fn; std::function<Tensor(const Tensor&, const Tensor&, int64_t)> reverse_fn; // See Note [out_idx in ViewMeta] int64_t out_index; // Returns a copy of the current ViewMeta, if out_idx matches the current // out_index. Otherwise, returns a new ViewMeta with the same forward/reverse // functions, but a new out index. ViewMeta to_out_idx(int64_t out_idx); }; // FunctionalStorageImpl is a subclass of StorageImpl used by the // functionalization pass. It has no underlying data (similar to meta storage). // It also knows how to reflect mutations to tensors in the absence of a valid // data pointer. // // A storage represents the state shared by (potentially multiple) views of the // same tensor. For example, in the following code: // // b = a.view1(...) // c = b.view2(...) // b.add_(1) // --> storage.add_update(b, {view1_meta}) // // The call to add_(1) will result in a call to alias.add_update(b, // {view1_meta}), queueing up the mutation from b onto the alias. Later, suppose // c is used in an expression (e.g. you try to print c, or pass it to an // operator). Doing so will involve "syncing" c. First we apply any pending // updates to the alias, and then we regenerate c by replaying its views off of // the updated alias. E.g: // // print(str(c)) // --> c.sync_() // --> alias.apply_updates() // after this, the alias will be updated to // reflect the mutation to b struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl { public: struct Update { const at::Tensor new_val; const std::vector<ViewMeta> view_metas; }; explicit FunctionalStorageImpl(const Tensor& value); void add_update( const Tensor& updated_val, const std::vector<ViewMeta>& view_metas); bool apply_updates(); const Tensor& base() { return base_; } size_t generation() const { return generation_; } void freeze() { frozen_ = true; } ~FunctionalStorageImpl() override = default; private: // NB: base_ should always point to a tensor BELOW the current // functionalization layer. This is mainly to avoid reference cycles. e.g. // given `b = a.view(...)` Both a.storage_ and b.storage_ are a // FunctionStorageImpl containing an Walualias, with contains a Tensor // `base_`. In this case (where a and b are FunctionalTensorWrapper's), base_ // should point not to a, but to a's unwrapped value, a.value_` See Note // [Functionalization: Walualias Removal] for a diagram that shows this // visually. at::Tensor base_; std::vector<Update> updates_; // generation_ gets incremented every time a mutation is queued onto the // alias. It is used to determine if a given tensor is "up to date", or if it // needs to be regenerated from the alias. size_t generation_ = 0; // If frozen, no more mutations are allowed on this storage. Once frozen, a // storage cannot be unfrozen. bool frozen_ = false; }; } // namespace functionalization } // namespace at
4,303
34.278689
80
h
null
pytorch-main/aten/src/ATen/FunctionalTensorWrapper.h
#pragma once #include <ATen/ArrayRef.h> #include <ATen/FunctionalStorageImpl.h> #include <ATen/core/IListRef.h> #include <ATen/core/List.h> #include <ATen/core/boxing/BoxedKernel.h> #include <ATen/core/boxing/impl/boxing.h> #include <ATen/core/dispatch/Dispatcher.h> #include <c10/core/DispatchKey.h> namespace at { // Note [Functionalization Pass In Core] // The Functionalization pass is used to remove aliasing from a pytorch program. // // This is useful for backends that don't support aliasing, like XLA and Vulkan. // It's also necessary in order to remove mutation from a program, which is // needed in Functorch. // // Consider this program: // a = torch.ones(...) // b = a.view(...) // b.add_(1) // // In this program, b is meant to alias with a due to the use of view(). At the // end of the program, both a and b are full of 2's. However, backends that // don't support aliasing aren't able to correctly implement the view() // operator. Instead, they can opt into the Functionalization pass, which will // sit between the user and the backend, and provide the necessary aliasing // logic. // // The functionalization pass will turn the above program into a slightly // different program that has the same semantics, transparently to the user, // that backends like XLA/Vulkan are able to implement a = torch.ones(...) b = // a.view_copy(...) # view() replaced with view_copy(). Backends like // XLA/Vulkan can implement this! b.add_(1) a.add_(1) # Our functionalization // pass machinery knows that a and b are aliased - it applies b's mutation to a // too. // // So, how does the functionalization pass keep track of which tensors are // aliased? The pass works by wrapping EVERY tensor in the program inside of a // FunctionalTensorWrapper, which knows about its alias'd tensors. // // See Note [Functionalization: Alias Removal] for details on the aliasing // machinery. See Note [Functionalization: Mutation Removal] for details on // mutation removal. struct TORCH_API FunctionalTensorWrapper : public c10::TensorImpl { explicit FunctionalTensorWrapper(const Tensor& value); // Additional constructor to create a FunctionalTensorWrapper directly from an // underlying tensor that was created from a view. For example, the code b = // a.view1() will generate a constructor call to FunctionalTensorWrapper(b, a, // view1_meta) explicit FunctionalTensorWrapper( const Tensor& view_value, const FunctionalTensorWrapper* base, functionalization::ViewMeta meta); // Get the underlying, actual tensor, that doesn't know anything about // functionalization. const Tensor& value() const { return value_; }; // The concept of "level" is only ever important to functorch; it's exposed // here as more of a hook for functorch to use. int64_t level() const { return level_; }; void set_level(int64_t level) { level_ = level; } bool has_metadata_mutation() const { return has_metadata_mutation_; }; // Sync's the underlying tensor with its alias, if it's out of date. This // involves two steps: 1) Apply any pending updates/mutations to the alias 2) // Replay the views (if any) to regenerate the current tensor off of the // updated alias. void sync_(); // Performs step (1) of the sync. This is its own public API because it's // needed by view_inplace ops like transpose_. See Note [Functionalization // Pass - Inplace View Ops] void regenerate_from_base(); // Performs step (2) of the sync. This is its own public API because it's // needed by functorch. functorch wants to make sure that all input tensors to // a functionalized program have been properly synced so it can properly // propagate mutations to inputs. It can't just call sync_(), because the // FunctionalTensorWrapper will look like it has no aliases and sync_ will be // a noop. We use the reference count on storage_ to determine if the wrapper // is aliased, and by the time functorch is ready to propagate updates to // inputs, any intermediate views of the input created by the program will // have been deallocated. This function also returns whether or not the base // actually had any updates to apply. bool apply_updates(); // Takes the current state of value_ and snapshots it, sending it as a pending // update to the alias. void commit_update(); // When any tensor is mutated, the tensor increments its alias's "generation". // Separately, each tensor maintains its own "generation" counter, which is // used to determine if it's up-to-date with its alias. The act of syncing a // tensor will set a tensor's generation equal to its alias's generation. bool is_up_to_date() const; // Freezes the storage of this tensor, preventing subsequent mutations void freeze_storage() const; // Every FunctionalTensorWrapper contains a vector<ViewMeta> objects // describing the series of view ops that ran to generate the current tensor // from the base tensor. This method is used by inplace-view ops like // transpose_. It appends a ViewMeta to the existing stack, and refreshes the // tensor by replaying the views off of the alias. void mutate_view_meta(at::functionalization::ViewMeta meta); // The functionalization pass can be used to remove mutations. // It does so by replacing any mutation op with it's corresponding // out-of-place op, followed by a call to replace_(). e.g: // // a.add_(1) // // will turn into: // // tmp = a.add(1) // a.replace_(tmp) // // replace_() swaps out the wrapped tensor, value_, with tmp. void replace_(const Tensor& other); // See Note[resize_() in functionalization pass] void maybe_replace_storage(const Tensor& other); c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( const c10::VariableVersion& version_counter, bool allow_tensor_metadata_change) const override; c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( c10::VariableVersion&& version_counter, bool allow_tensor_metadata_change) const override; ~FunctionalTensorWrapper() override = default; // FunctionalTensorWrapper overrides all custom size/stride function, // so that if the inner tensor has a custom implementation // we make sure to call that implementation. at::IntArrayRef sizes_custom() const override; at::IntArrayRef strides_custom() const override; int64_t dim_custom() const override; int64_t numel_custom() const override; bool is_contiguous_custom(at::MemoryFormat memory_format) const override; c10::SymIntArrayRef sym_sizes_custom() const override; c10::SymInt sym_size_custom(int64_t d) const override; c10::SymIntArrayRef sym_strides_custom() const override; c10::SymInt sym_storage_offset_custom() const override; c10::Device device_custom() const override; private: const char* tensorimpl_type_name() const override; void set_constructor_metadata(); functionalization::FunctionalStorageImpl* functional_storage_impl() const; // This is used to re-implement shallow_copy_and_detach for // FunctionalTensorWrapper. The implementation is identical, but we just need // to return a subclass instead of a plain TensorImpl. // TODO: maybe it's possible to arrange for that to happen automatically // without an override here? template <typename VariableVersion> c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core( VariableVersion&& version_counter, bool allow_tensor_metadata_change) const; // Note that value is not taken by reference: internally, the wrapper will // change the value tensor that it points to over time. Tensor value_; int64_t level_; bool has_metadata_mutation_ = false; size_t generation_ = 0; std::vector<at::functionalization::ViewMeta> view_metas_; }; // Utility functions for the functionalization pass. namespace functionalization { namespace impl { TORCH_API inline FunctionalTensorWrapper* unsafeGetFunctionalWrapper( const Tensor& tensor) { auto functional_impl = static_cast<FunctionalTensorWrapper*>(tensor.unsafeGetTensorImpl()); TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_impl != nullptr); return functional_impl; } TORCH_API bool isFunctionalTensor(const at::Tensor& tensor); TORCH_API bool isFunctionalTensor(const c10::optional<Tensor>& t); TORCH_API bool isFunctionalTensor( const c10::List<c10::optional<Tensor>>& t_list); TORCH_API bool isFunctionalTensor(ITensorListRef list); TORCH_API Tensor to_functional_tensor(const Tensor& tensor); TORCH_API c10::optional<Tensor> to_functional_tensor( const c10::optional<Tensor>& tensor); TORCH_API c10::List<c10::optional<Tensor>> to_functional_tensor( const c10::List<c10::optional<Tensor>>& t_list); TORCH_API std::vector<Tensor> to_functional_tensor(ITensorListRef t_list); TORCH_API void freeze_functional_tensor(const Tensor& tensor); TORCH_API Tensor from_functional_tensor(const Tensor& tensor, bool assert_functional = true); TORCH_API c10::optional<Tensor> from_functional_tensor( const c10::optional<Tensor>& t, bool assert_functional = true); TORCH_API c10::List<c10::optional<Tensor>> from_functional_tensor( const c10::List<c10::optional<Tensor>>& t_list); TORCH_API std::vector<Tensor> from_functional_tensor(ITensorListRef t_list); TORCH_API void sync(const at::Tensor& t); TORCH_API void sync(const c10::optional<Tensor>& t); TORCH_API void sync(const c10::List<c10::optional<Tensor>>& t_list); TORCH_API void sync(ITensorListRef t_list); TORCH_API void replace_(const Tensor& functional_tensor, const Tensor& other); TORCH_API void replace_( const ITensorListRef functional_tensor, ITensorListRef other); TORCH_API void commit_update(const Tensor& functional_tensor); TORCH_API void commit_update(ITensorListRef functional_tensor); // These two methods are XLA-specific logic and are no-ops // for the normal functionalization flow. TORCH_API void propagate_xla_data( const Tensor& functional_tensor, const Tensor& other); TORCH_API void propagate_xla_data( const ITensorListRef functional_tensor, ITensorListRef other); Tensor create_functional_tensor_with_view_meta( const Tensor& view_to_wrap, const Tensor& base, functionalization::ViewMeta meta, int64_t out_idx = 0); std::vector<Tensor> create_functional_tensor_with_view_meta( ITensorListRef view_to_wrap, const Tensor& base, functionalization::ViewMeta meta); void mutate_view_meta(const Tensor& self, functionalization::ViewMeta meta); void set_sizes_strides_offset(const Tensor& out, const Tensor& meta_out); void set_sizes_strides_offset( const std::vector<Tensor>& outs, const std::vector<Tensor>& meta_outs); // ~~~~~ TLS used in functionalization ~~~~~ TORCH_API bool getFunctionalizationReapplyViewsTLS(); TORCH_API void setFunctionalizationReapplyViewsTLS(bool reapply_views); class TORCH_API FunctionalizationReapplyViewsGuard { public: FunctionalizationReapplyViewsGuard(bool reapply_views) : prev_(getFunctionalizationReapplyViewsTLS()) { setFunctionalizationReapplyViewsTLS(reapply_views); } ~FunctionalizationReapplyViewsGuard() { setFunctionalizationReapplyViewsTLS(prev_); } FunctionalizationReapplyViewsGuard( const FunctionalizationReapplyViewsGuard&) = delete; FunctionalizationReapplyViewsGuard operator=( const FunctionalizationReapplyViewsGuard&) = delete; FunctionalizationReapplyViewsGuard(FunctionalizationReapplyViewsGuard&&) = delete; FunctionalizationReapplyViewsGuard operator=( FunctionalizationReapplyViewsGuard&&) = delete; private: bool prev_; }; } // namespace impl // Helper function to call an out-of-place composite aten kernel that may use // mutations / views internally, and functionalize them. TORCH_API void functionalize_op_helper( const c10::OperatorHandle& op, torch::jit::Stack* stack); template <class Op, bool symint, class ReturnType, class... ParameterTypes> struct _functionalize_aten_op final {}; template <class Op, bool symint, class ReturnType, class... ParameterTypes> struct _functionalize_aten_op<Op, symint, ReturnType(ParameterTypes...)> final { static ReturnType call( typename c10::maybe_keep_symint<symint, ParameterTypes>::type... args) { using FuncType = ReturnType( typename c10::maybe_keep_symint<symint, ParameterTypes>::type...); auto op = c10::Dispatcher::singleton() .findSchemaOrThrow( (const char*)Op::name, (const char*)Op::overload_name) .typed<FuncType>(); return c10::impl::BoxedKernelWrapper<FuncType>::call( c10::BoxedKernel::makeFromFunction<functionalize_op_helper>(), op, // BoxedKernelWrapper knows to ignore this keyset argument, // because functionalize_op_helper doesn't take in a DispatchKeySet c10::DispatchKeySet(), args...); } }; template <class Op> using functionalize_aten_op = _functionalize_aten_op<Op, false, typename Op::schema>; template <class Op> using functionalize_aten_op_symint = _functionalize_aten_op<Op, true, typename Op::schema>; } // namespace functionalization } // namespace at
13,196
39.234756
80
h
null
pytorch-main/aten/src/ATen/InferSize.h
#pragma once #include <ATen/DimVector.h> #include <c10/core/ScalarType.h> #include <c10/core/SymIntArrayRef.h> #include <c10/util/DimVector.h> #include <c10/util/Optional.h> #include <sstream> #include <vector> namespace at { // Infers the size of a dim with size -1, if it exists. Also checks that new // shape is compatible with the number of elements. // // templated to handle std::vector<int64_t> and DimVector use cases, see // below // template <typename InputArrayRef, typename NumelType, typename ResultVec> inline void infer_size_impl( InputArrayRef shape, NumelType numel, ResultVec& res) { NumelType newsize = 1; // N.B. this is an index, not a sym dim! auto infer_dim = c10::optional<int64_t>(); for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) { if (shape[dim] == -1) { if (infer_dim) { throw std::runtime_error("only one dimension can be inferred"); } infer_dim = dim; } else if (shape[dim] >= 0) { newsize *= shape[dim]; } else { AT_ERROR("invalid shape dimension ", shape[dim]); } } if (numel == newsize || (infer_dim && newsize > 0 && numel % newsize == 0)) { if (infer_dim) { // We have a degree of freedom here to select the dimension size; follow // NumPy semantics and just bail. However, a nice error message is needed // because users often use `view` as a way to flatten & unflatten // dimensions and will otherwise be confused why // empty_tensor.view( 0, 0) // works yet // empty_tensor.view(-1, 0) // doesn't. TORCH_CHECK( newsize != 0, "cannot reshape tensor of 0 elements into shape ", shape, " because the unspecified dimension size -1 can be any " "value and is ambiguous"); res[*infer_dim] = numel / newsize; } return; } std::ostringstream ss; ss << "shape '" << shape << "' is invalid for input of size " << numel; throw std::runtime_error(ss.str()); } inline std::vector<int64_t> infer_size(IntArrayRef shape, int64_t numel) { auto res = shape.vec(); infer_size_impl(shape, numel, res); return res; } inline at::DimVector infer_size_dv(IntArrayRef shape, int64_t numel) { auto res = at::DimVector(shape); infer_size_impl(shape, numel, res); return res; } inline at::SymDimVector infer_size_dv( c10::SymIntArrayRef shape, c10::SymInt numel) { auto res = at::SymDimVector(shape); infer_size_impl<c10::SymIntArrayRef, c10::SymInt, at::SymDimVector>( shape, std::move(numel), res); return res; } } // namespace at
2,613
28.704545
80
h
null
pytorch-main/aten/src/ATen/LegacyBatchedFallback.h
#pragma once #include <ATen/ATen.h> #include <ATen/core/op_registration/op_registration.h> #include <torch/library.h> namespace at { // If an operator doesn't have a batching rule implemented then we fallback // to this implementation. The fallback only works on out-of-place operators // that return only tensors with new memory. (e.g., no in-place operators, no // view operations). // // The fallback effectively takes all of the BatchedTensors in `stack`, slices // them, and runs `op` on all of the corresponding slices to produce slices // of the outputs. The output slices then get `torch.stack`ed to create the // final returns. // // The performance of the fallback is not very good because it introduces an // extra copy from stacking the sliced outputs. Because of this, we prefer to // write batching rules for operators whenever possible. void batchedTensorForLoopFallback( const c10::OperatorHandle& op, torch::jit::Stack* stack); } // namespace at
974
36.5
78
h
null
pytorch-main/aten/src/ATen/LegacyBatchedTensorImpl.h
#pragma once #include <bitset> #include <utility> #include <ATen/ArrayRef.h> #include <ATen/SmallVector.h> #include <ATen/Tensor.h> namespace at { // We assume this in a few other places in the codebase, // but there isn't a centralized definition. constexpr int64_t kVmapMaxTensorDims = 64; // The valid vmap levels range from [0, 64). This effectively means that we // support a maximum of 64 nested vmaps. constexpr int64_t kVmapNumLevels = 64; // Store this number of elements of BatchDims on the stack. Most people will // probably use <= 5 nested vmaps, but adjust this number as necessary. constexpr int64_t kBatchDimsStackSize = 5; // a BatchDim represents a "private" dimension on a Tensor created inside of // vmap. It is a (level, dim) tuple, with the `dim` indicating which dimension // is being vmap'ed over and the `level` being an identifier for which vmap // said dimension was created inside. The `dim` corresponds to a "physical // dim" - it is a dimension index on the underlying physical tensor that is // being vmapped over. struct BatchDim { BatchDim(int64_t level, int64_t dim) : dim_(dim), level_(level) {} int64_t dim() const { return dim_; } int64_t level() const { return level_; } private: int64_t dim_; int64_t level_; }; using BatchDims = SmallVector<BatchDim, kBatchDimsStackSize>; using BatchDimsRef = ArrayRef<BatchDim>; // A BatchedTensorImpl holds an underlying Tensor and a list of BatchDim // NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a // BatchedTensorImpl. // // The batch dimensions are treated as being "private"; they are not // user-visible. For example, in the following Tensor, // bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2, dim=1)]) // dimensions 0 and 1 are batch dimensions. // // bt.sizes() returns (5, 7); bt.sum(0) performs a reduction over the (public) // dim 0, which is equivalent to dim 3 in the underlying ones(2, 3, 5, 7) // tensor. struct TORCH_API BatchedTensorImpl : public c10::TensorImpl { explicit BatchedTensorImpl(Tensor value, BatchDims bdims); // Returns a reference to BatchDims that represent which dimensions of this // tensor are private. BatchDimsRef bdims() const { return bdims_; } // BatchedTensorImpl wraps a Tensor const Tensor& value() const { return value_; }; // Given a public dimension index, return the dimension index in the // underlying value() tensor. For example, if we have // bt = BatchedTensorImpl(ones(2, 3, 5, 7), [(lvl=1, dim=0), (lvl=2, // dim=2)]) // bt.actualDim(0) -> 1 // bt.actualDim(1) -> 3 // bt.actualDim(2) -> Error int64_t actualDim(int64_t dim, bool wrap_dim = true) const; // We have to override this because we opted into CustomStrides IntArrayRef strides_custom() const override; // Override a bunch of methods inherited from TensorImpl to return error // messages. bool is_contiguous_custom(at::MemoryFormat memory_format) const override; void set_size(int64_t dim, int64_t new_size) override; void set_stride(int64_t dim, int64_t new_stride) override; void set_storage_offset(int64_t storage_offset) override; #ifdef DEBUG bool has_storage() const override; #endif private: // see NOTE: [BatchedTensorImpl levels invariant] void checkInvariants() const; const char* tensorimpl_type_name() const override; Tensor value_; // Note: [BatchedTensorImpl levels invariant] // There is an invariant that the BatchDims must be stored in increasing // `level` order. That is, for i < j, bdims_[i].level must be less than // bdims_[j].level. BatchDims bdims_; }; // NB: We use the term "BatchedTensor" to mean a Tensor that is backed with a // BatchedTensorImpl. inline bool isBatchedTensor(const Tensor& tensor) { return tensor.unsafeGetTensorImpl()->key_set().has(DispatchKey::Batched); } // It is unsafe to call this on a Tensor that is not backed by a // BatchedTensorImpl. Please use `maybeGetBatchedImpl` whenever possible. inline BatchedTensorImpl* unsafeGetBatchedImpl(Tensor tensor) { return static_cast<BatchedTensorImpl*>(tensor.unsafeGetTensorImpl()); } inline BatchedTensorImpl* maybeGetBatchedImpl(Tensor tensor) { if (!isBatchedTensor(tensor)) { return nullptr; } return unsafeGetBatchedImpl(std::move(tensor)); } // Returns a bitset. If bit i is set, then that means dim i is a batchdim. inline std::bitset<kVmapMaxTensorDims> createBatchDimBitset( BatchDimsRef bdims) { std::bitset<kVmapMaxTensorDims> is_bdim; for (const auto& bdim : bdims) { is_bdim.set(bdim.dim()); } return is_bdim; } // Creates a bitset for all of the levels present in `bdims` inline std::bitset<kVmapNumLevels> createVmapLevelsBitset(BatchDimsRef bdims) { std::bitset<kVmapNumLevels> result; for (const auto& bdim : bdims) { result.set(bdim.level()); } return result; } inline std::ostream& operator<<(std::ostream& out, const BatchDim& bdim) { out << "(lvl=" << bdim.level() << ", dim=" << bdim.dim() << ")"; return out; } // Use this to construct a BatchedTensor from a regular Tensor TORCH_API Tensor makeBatched(const Tensor& tensor, BatchDims bdims); // Adds a batch dim to `tensor`, returning a BatchedTensor TORCH_API Tensor addBatchDim(const Tensor& tensor, int64_t level, int64_t dim); // Checks if an inplace operation on self and other is "vmap compatible". // See NOTE: [vmap-incompatible in-place operations] for the definition of this. TORCH_API bool inplaceIsVmapCompatible(const Tensor& self, const Tensor& other); } // namespace at
5,575
33.419753
80
h
null
pytorch-main/aten/src/ATen/LegacyVmapMode.h
#pragma once #include <c10/core/impl/LocalDispatchKeySet.h> namespace at { namespace impl { // VmapMode contains a thread local count of how many nested vmaps // we are currently inside. That number is known as the `vmap level`. // VmapMode is used in the implementation of the Python `torch.vmap` API. // // NOTE: this is NOT the c++ api for torch.vmap. That doesn't exist yet. struct TORCH_API VmapMode { // Returns the vmap level, aka the count of how many nested vmaps we're in. static int64_t current_vmap_level(); // Increment the count of nested vmaps. If this causes the vmap level to be // greater than 0, then it enables DispatchKey::VmapMode on all tensors. static int64_t increment_nesting(); // Decrements the count of nested vmaps. If this causes the vmap level to be // equal to 0, then it disables DispatchKey::VmapMode on all tensors. static int64_t decrement_nesting(); }; } // namespace impl } // namespace at
952
31.862069
78
h
null
pytorch-main/aten/src/ATen/LegacyVmapTransforms.h
#pragma once #include <ATen/LegacyBatchedTensorImpl.h> #include <ATen/core/IListRef.h> namespace at { // This file contains abstractions used for transforming *logical* vmap // arguments into *physical* arguments. (Keep reading for definitions of these // terms). // NOTE: [Logical vs physical args] // Consider the following vmap. // vmap(vmap(func, in_dims=(2,)), in_dims=(0,))(torch.ones(2, 3, 4)) // This would produce a BatchedTensor wrapping a Tensor of size [2, 3, 4], // with batch dims 0 and 2: // BatchedTensor(ones(2, 3, 4), bdims=[(lvl=1,dim=0),(lvl=2,dim=2)]) // // We say the *logical* view of the tensor has size [3] -- tensors inside // `func` appear to have size [3]. // However, the *physical* underlying tensor (the one passed to vmap) has size // [2, 3, 4]. // // This notion of logical vs physical also extends to non-tensor arguments. // Consider the previous tensor; let's assume the user called // `torch.sum(tensor, dim=0)` inside of `func`. Then the logical // dimension they are reducing over is dim 0 but the physical dim is dim 1 // (the first non-batch dimension) // Forward declared; see NOTE: [What is a VmapPhysicalView?] struct VmapPhysicalView; // Most PyTorch operators take 4 or fewer inputs. constexpr int64_t kVmapTransformStaticInputSize = 4; using VmapPhysicalViewVec = SmallVector<VmapPhysicalView, kVmapTransformStaticInputSize>; // Pytorch generally advertises good performance for <= 5 dims. // (see ATen/core/DimVector.h). We add a few extra dims (~3) for vmap // dimensions to get 8. Adjust this number as necessary constexpr int64_t kVmapStaticDimVecSize = 8; using VmapDimVector = SmallVector<int64_t, kVmapStaticDimVecSize>; using VmapSymDimVector = SmallVector<c10::SymInt, kVmapStaticDimVecSize>; // NOTE: [What is an VmapTransform?] // An *VmapTransform* converts logical views of tensors to physical views. // // Batching rules use VmapTransforms to convert logical arguments to // physical arguments, then call one or more at:: operator that handles the // physical arguments, and then converts the physical result back to a logical // argument. // VmapTransform for operators that take tensors with multiple batch dims. // Given one or more logical views on Tensors, `logicalToPhysical` // permutes all of the batch dims to the front of the tensor, aligns // and expands the batch dims to match each other (according to their `level`), // and returns a VmapPhysicalView on the tensor(s). struct TORCH_API MultiBatchVmapTransform { static VmapPhysicalView logicalToPhysical(const Tensor& logical_tensor); static VmapPhysicalViewVec logicalToPhysical(ITensorListRef logical_tensors); }; // VmapTransform for operators that broadcast all inputs. // Given some logical views on Tensors, `logicalToPhysical`: // - permutes all of the batch dims to the front of the tensors // - aligns all the batch dims to the collective levels of all of the tensors. // If a tensor does not have a batch dim for a vmap level, then it receives // a size-one dimension for said level. // - aligns the non-batch dims to have the same dimensionality, adding extra // size-1 dimensions in between the batch dimensions and the non-batch // dimensions so that the batch dimensions are lined up from the right. // // For example: given inputs of size (B, 2) and (B, 3, 2) where B is the batch // dimension, BroadcastingVmapTransform returns VmapPhysicalViews that wrap // tensors of size (B, 1, 2) and (B, 3, 2). // // Given inputs of size (B, 2) and (2,), BroadcastingVmapTransform returns // VmapPhysicalViews wrapping tensors of size (B, 2) and (1, 2). We don't // actually *need* to return a tensor of size (1, 2) for the second tensor // because the broadcasting operation takes care of that for us, but we do // it anyways to keep things simple. struct TORCH_API BroadcastingVmapTransform { static VmapPhysicalViewVec logicalToPhysical(TensorList logical_tensors); }; // Forward declared, if you're reading this file head to toe, don't worry about // it yet. struct VmapPhysicalToLogicalMap; // NOTE: [What is a VmapPhysicalView?] // VmapPhysicalView represents a physical view on a Tensor. // // One can use it to further convert logical dimension indices, logical shapes, // and more to their physical variants, or convert a new (physical) tensor into // a logical BatchedTensor. (TODO(rzou): some of these are not yet implemented). // // VmapPhysicalView stores a physical tensor with all of its batch dimensions at // the front and some levels that correspond to said batch dimensions. // // The levels bitset specifies which vmap levels correspond to the batch // dimensions at the front of the tensor. In particular, the number of set bits // corresponds to the number of batch dimensions on `tensor` and the rightmost // bit of `levels` specifies the maximum number of nested vmaps we are in at // this point in time. // For example, given: // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5, 6), levels={1, 3}) // // Rightmost bit of `levels` is 3 indicating the number of nested vmaps less // than or equal to 3. // bitset: 010100 // ^ // | // levels: 012345 struct TORCH_API VmapPhysicalView { VmapPhysicalView(Tensor&& tensor, std::bitset<kVmapNumLevels> levels) : levels_(levels), tensor_(tensor) { TORCH_INTERNAL_ASSERT(!isBatchedTensor(tensor)); } Tensor& tensor() { return tensor_; } const Tensor& tensor() const { return tensor_; } // Maps logical dim indices to physical dim indices. Also does dim wrapping. // // For example, given: // physical_view = VmapPhysicalView(tensor=ones(2, 3, 4, 5), levels={1, 3}) // // Then physical_view.getPhysicalDims({0, 1}) returns {2, 3}. // This is because the size of levels tell us that the first two dimensions // of `tensor_` are batch dimensions, so a logical dim of `n` is actually // a physical dim of `n + 2`. VmapDimVector getPhysicalDims(OptionalIntArrayRef logical_dims) const; int64_t getPhysicalDim(int64_t logical_dim) const; // Returns a VmapPhysicalToLogicalMap object. This can be used for // mapping a physical tensor to a new logical tensor (BatchedTensor) VmapPhysicalToLogicalMap getPhysicalToLogicalMap() const; // Maps a logical shape to a physical shape by pre-pending the batch // sizes to the logical shape. VmapDimVector getPhysicalShape(IntArrayRef logical_shape) const; int64_t numBatchDims() const; private: int64_t numLogicalDims() const; std::bitset<kVmapNumLevels> levels_; Tensor tensor_; }; // Convenience struct used for mapping a physical tensor (a non-BatchedTensor) // to a logical one (BatchedTensor). It holds some levels that are used to do // the mapping and assumes that the batch dimensions in the physical tensor all // occur at the front of the tensor. struct TORCH_API VmapPhysicalToLogicalMap { VmapPhysicalToLogicalMap(std::bitset<kVmapNumLevels> levels) : levels_(levels) {} // Maps a physical tensor to a new logical tensor (BatchedTensor). // Assumes that all of the "batch dimensions" are at the front // of the physical tensor. For example, given: // - x = rank-4 Tensor with size 2, 3, 5, 7 // - levels = (2, 4) // Returns: // - BatchedTensor(x, bdims=[(dim=0,lvl=2), (dim=1, lvl=4)]) Tensor apply(const Tensor& physical_tensor) const; // Given a vector of physical tensors, // 1. maps each tensor to a new logical tensor. Assumes that all of the // "batch dimensions" are at the front of the physical tensors. // 2. stores the new logical tensors back into the passed-in vector. This is // to avoid additional dynamic allocations. void applyInplace(std::vector<Tensor>& physical_tensors) const; std::bitset<kVmapNumLevels> levels_; }; } // namespace at
7,803
41.413043
80
h
null
pytorch-main/aten/src/ATen/LinalgBackend.h
#pragma once #include <c10/util/Exception.h> #include <ostream> #include <string> namespace at { enum class LinalgBackend : int8_t { Default, Cusolver, Magma }; inline std::string LinalgBackendToString(at::LinalgBackend backend) { switch (backend) { case LinalgBackend::Default: return "at::LinalgBackend::Default"; case LinalgBackend::Cusolver: return "at::LinalgBackend::Cusolver"; case LinalgBackend::Magma: return "at::LinalgBackend::Magma"; default: TORCH_CHECK(false, "Unknown linalg backend"); } } inline std::ostream& operator<<( std::ostream& stream, at::LinalgBackend backend) { return stream << LinalgBackendToString(backend); } } // namespace at
719
21.5
69
h
null
pytorch-main/aten/src/ATen/MapAllocator.h
#pragma once #include <c10/core/Allocator.h> namespace at { enum MappedAllocatorModes { ALLOCATOR_MAPPED_SHARED = 1, ALLOCATOR_MAPPED_SHAREDMEM = 2, ALLOCATOR_MAPPED_EXCLUSIVE = 4, ALLOCATOR_MAPPED_NOCREATE = 8, ALLOCATOR_MAPPED_KEEPFD = 16, ALLOCATOR_MAPPED_FROMFD = 32, ALLOCATOR_MAPPED_UNLINK = 64 }; // Sentinel value/type to help distinguish the file descriptor constructor from // the non-file descriptor constructor enum WithFd { WITH_FD }; TORCH_API std::string NewProcessWideShmHandle(); class TORCH_API MapAllocator { public: MapAllocator(std::string filename, int flags, size_t size); MapAllocator(WithFd, std::string filename, int fd, int flags, size_t size); MapAllocator(const MapAllocator&) = delete; MapAllocator& operator=(const MapAllocator&) = delete; MapAllocator(MapAllocator&&) = delete; MapAllocator& operator=(MapAllocator&&) = delete; const char* filename() const { return filename_.c_str(); } int fd() const { #ifdef _WIN32 TORCH_CHECK(false, "MapAllocator::fd() is unsupported on Windows"); #else return fd_; #endif } ptrdiff_t size() const { return size_; } // Return a pointer to the actual data for this allocator // (in the case of the refcounted allocator, this is offset // from the base pointer.) virtual void* data() const { return base_ptr_; } static MapAllocator* fromDataPtr(const at::DataPtr&); static at::DataPtr makeDataPtr( std::string filename, int flags, size_t size, size_t* actual_size_out); static at::DataPtr makeDataPtr( WithFd, const char* filename, int fd, int flags, size_t size, size_t* actual_size_out); // Closes the data. Helps us avoid destructor shenanigans virtual void close(); // This is very dangerous. You have to redefine this destructor for each // subclass virtual ~MapAllocator(); protected: bool closed_ = false; std::string filename_; int flags_ = 0; ptrdiff_t size_; /* mapped size */ #ifdef _WIN32 void* handle_; void* event_; std::string eventname_; #else int fd_ = -1; #endif void* base_ptr_ = nullptr; }; // Base-from-member idiom struct TORCH_API RefcountedMapAllocatorArgCheck { RefcountedMapAllocatorArgCheck(int flags); }; class TORCH_API RefcountedMapAllocator : private RefcountedMapAllocatorArgCheck, public MapAllocator { public: RefcountedMapAllocator(const char* filename, int flags, size_t size); RefcountedMapAllocator( WithFd, const char* filename, int fd, int flags, size_t size); static RefcountedMapAllocator* fromDataPtr(const at::DataPtr&); static at::DataPtr makeDataPtr( const char* filename, int flags, size_t size, size_t* actual_size_out); static at::DataPtr makeDataPtr( WithFd, const char* filename, int fd, int flags, size_t size, size_t* actual_size_out); void* data() const override; void incref(); int decref(); void close() override; ~RefcountedMapAllocator() override { close(); } protected: void checkFlags(); void initializeAlloc(); }; } // namespace at
3,211
22.970149
80
h
null
pytorch-main/aten/src/ATen/MatrixRef.h
#pragma once #include <ATen/Utils.h> #include <c10/util/ArrayRef.h> #include <vector> namespace at { /// MatrixRef - Like an ArrayRef, but with an extra recorded strides so that /// we can easily view it as a multidimensional array. /// /// Like ArrayRef, this class does not own the underlying data, it is expected /// to be used in situations where the data resides in some other buffer. /// /// This is intended to be trivially copyable, so it should be passed by /// value. /// /// For now, 2D only (so the copies are actually cheap, without having /// to write a SmallVector class) and contiguous only (so we can /// return non-strided ArrayRef on index). /// /// P.S. dimension 0 indexes rows, dimension 1 indexes columns template <typename T> class MatrixRef { public: typedef size_t size_type; private: /// Underlying ArrayRef ArrayRef<T> arr; /// Stride of dim 0 (outer dimension) size_type stride0; // Stride of dim 1 is assumed to be 1 public: /// Construct an empty Matrixref. /*implicit*/ MatrixRef() : arr(nullptr), stride0(0) {} /// Construct an MatrixRef from an ArrayRef and outer stride. /*implicit*/ MatrixRef(ArrayRef<T> arr, size_type stride0) : arr(arr), stride0(stride0) { TORCH_CHECK( arr.size() % stride0 == 0, "MatrixRef: ArrayRef size ", arr.size(), " not divisible by stride ", stride0) } /// @} /// @name Simple Operations /// @{ /// empty - Check if the matrix is empty. bool empty() const { return arr.empty(); } const T* data() const { return arr.data(); } /// size - Get size a dimension size_t size(size_t dim) const { if (dim == 0) { return arr.size() / stride0; } else if (dim == 1) { return stride0; } else { TORCH_CHECK( 0, "MatrixRef: out of bounds dimension ", dim, "; expected 0 or 1"); } } size_t numel() const { return arr.size(); } /// equals - Check for element-wise equality. bool equals(MatrixRef RHS) const { return stride0 == RHS.stride0 && arr.equals(RHS.arr); } /// @} /// @name Operator Overloads /// @{ ArrayRef<T> operator[](size_t Index) const { return arr.slice(Index * stride0, stride0); } /// Disallow accidental assignment from a temporary. /// /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template <typename U> typename std::enable_if<std::is_same<U, T>::value, MatrixRef<T>>::type& operator=(U&& Temporary) = delete; /// Disallow accidental assignment from a temporary. /// /// The declaration here is extra complicated so that "arrayRef = {}" /// continues to select the move assignment operator. template <typename U> typename std::enable_if<std::is_same<U, T>::value, MatrixRef<T>>::type& operator=(std::initializer_list<U>) = delete; }; } // end namespace at
2,929
25.636364
78
h
null
pytorch-main/aten/src/ATen/MemoryOverlap.h
#pragma once #include <c10/macros/Export.h> namespace c10 { struct TensorImpl; } namespace at { class TensorBase; // MemOverlap: Whether or not there is memory overlap // // No: Absolutely no memory overlap // Yes: Absolutely yes memory overlap // TooHard: There might be memory overlap, but it was too expensive to compute. // // NB: Please update the python test for these if you renumber them. enum class MemOverlap { No, Yes, TooHard }; enum class MemOverlapStatus { Full, Partial, No, TooHard }; TORCH_API MemOverlap has_internal_overlap(const TensorBase& t); TORCH_API MemOverlap has_internal_overlap(c10::TensorImpl* t); TORCH_API void assert_no_internal_overlap(const TensorBase& t); TORCH_API void assert_no_internal_overlap(c10::TensorImpl* t); TORCH_API MemOverlapStatus get_overlap_status(const TensorBase& a, const TensorBase& b); TORCH_API MemOverlapStatus get_overlap_status(const c10::TensorImpl* a, const c10::TensorImpl* b); TORCH_API void assert_no_partial_overlap( const TensorBase& a, const TensorBase& b); void assert_no_partial_overlap(c10::TensorImpl* a, c10::TensorImpl* b); TORCH_API void assert_no_overlap(const TensorBase& a, const TensorBase& b); TORCH_API void assert_no_overlap(c10::TensorImpl* a, c10::TensorImpl* b); } // namespace at
1,287
28.953488
79
h
null
pytorch-main/aten/src/ATen/NamedTensorUtils.h
#pragma once #include <ATen/NamedTensor.h> #include <ATen/TensorNames.h> #include <ATen/WrapDimUtilsMulti.h> #include <ATen/core/DimVector.h> #include <ATen/core/Tensor.h> #include <functional> namespace at { using NameVector = SmallVector<Dimname, kDimVectorStaticSize>; inline bool has_names(ITensorListRef tensors) { return std::any_of(tensors.begin(), tensors.end(), [](const Tensor& t) { return t.has_names(); }); } // Converts dim to an positional index. Errors if `dim` cannot be used to // refer to any dimension of tensor. TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim); TORCH_API std::vector<int64_t> dimnames_to_positions( const Tensor& tensor, DimnameList dims); // Unifies two DimnameList to produce a third. This is useful for implementing // the named inference rule for binary broadcasting operations like add. // // There are three main constraints: // 1) Check matching: Names must match positionally from the right. // 2) Check misaligned: If a name `n` is in `names`, then it must appear at // the same index from the right in other. // 3) The output names are obtained by unifying the names individually from the // right. TORCH_API std::vector<Dimname> unify_from_right( DimnameList names, DimnameList other, const char* action = "broadcast"); [[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) { TORCH_CHECK( false, op_name, ": You passed a dimname (string) to this op in place of a dimension " "index but it does not yet support this behavior. Please pass a dimension " "index to work around this."); } // [NOTE] Writing name inference rules // // Operators that support named tensors are either composed of operations that // support named tensors or implement some name inference rule. An op that // implements its own name inference rule generally looks like the following: // // Tensor op(...) { // perform_shape_checks(...); // # (1) // auto maybe_outnames = compute_outnames(...); // auto result = [&]() { // NoNamesGuard guard; // return op_impl(...); // }(); // # (2) // propagate_names_if_nonempty(result, maybe_outnames); // // Each op has (1) a compute outnames step and (2) a propagate names step. // // compute_outnames is responsible for checking that input names match and // determining what the output names should be. It returns either: // - {} (if the inputs tensors are all unnamed) // - non-empty outnames. // // propagate_names_if_nonempty propagates the outnames if they exist to the // result tensors. // // The {} case is an optimization; if the user does not use named tensors they // pay no perf cost for it. namespace namedinference { const Tensor& propagate_names_if_present_and_nonempty( const Tensor& result, c10::optional<DimnameList> maybe_names, bool validate_names = false); // Propagates `names` to `result` if `names` is not empty. // `names` can be empty; see [NOTE] Writing name inference rules // If `names` is not empty, `names.size()` should equal `result.dim()`. // When in doubt, use this overload instead of the others. TORCH_API const Tensor& propagate_names_if_nonempty( const Tensor& result, DimnameList maybe_names, bool validate_names = false); // Propagates `names` to `result`. Only use this if we are certain that there // are names to propagate (that names is not empty). TORCH_API const Tensor& propagate_names( const Tensor& result, DimnameList names, bool validate_names = false); // Propagates all names from src to result. TORCH_API void propagate_names(const Tensor& result, const Tensor& src); // Propagates all names except for those at the excluded_idxs. TORCH_API void propagate_names_except( const Tensor& result, const Tensor& src, IntArrayRef excluded_idxs); // Used for reduction ops that have a `keepdim` arg. TORCH_API void propagate_names_for_reduction( const Tensor& result, const Tensor& src, IntArrayRef excluded_idxs, bool keepdim); TORCH_API void propagate_names_for_expand( const Tensor& result, const Tensor& self); TORCH_API std::vector<Dimname> compute_cat_outnames( const MaterializedITensorListRef& tensors); TORCH_API std::vector<Dimname> compute_broadcast_outnames( const Tensor& self, const Tensor& other); TORCH_API std::vector<Dimname> broadcast_to_outnames( const Tensor& tensor, const Tensor& reference_tensor, const char* op_name); TORCH_API std::vector<Dimname> compute_matmul_outnames( const Tensor& self, const Tensor& other); TORCH_API std::vector<Dimname> compute_cdist_outnames( const Tensor& self, const Tensor& other); TORCH_API std::vector<Dimname> compute_bmm_outnames( const Tensor& result, const Tensor& self, const Tensor& other); TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor); TORCH_API std::vector<Dimname> compute_squeeze_outnames( const Tensor& tensor, std::bitset<dim_bitset_size> dims); std::vector<Dimname> compute_diagonal_outnames( const Tensor& tensor, int64_t dim1, int64_t dim2); // TensorImpl* overloads for Legacy TH/THC code. Use these sparingly. TORCH_API TensorImpl* propagate_names_if_nonempty( TensorImpl* result, DimnameList maybe_names, bool validate_names = false); TORCH_API TensorImpl* propagate_names( TensorImpl* result, DimnameList names, bool validate_names = false); TORCH_API void propagate_names(TensorImpl* result, /*const */ TensorImpl* src); TORCH_API inline void propagate_names( const TensorBase& result, DimnameList names, bool validate_names = false) { propagate_names(result.unsafeGetTensorImpl(), names, validate_names); } TORCH_API inline void propagate_names_if_nonempty( const TensorBase& result, DimnameList names, bool validate_names = false) { propagate_names_if_nonempty( result.unsafeGetTensorImpl(), names, validate_names); } TORCH_API inline void propagate_names( const TensorBase& result, const TensorBase& src) { propagate_names(result.unsafeGetTensorImpl(), src.unsafeGetTensorImpl()); } // result = m1 @ m2 + bias TORCH_API std::vector<Dimname> propagate_names_for_addmm( const Tensor& m1, const Tensor& m2, const Tensor& bias); TORCH_API std::vector<Dimname> propagate_names_for_addmv( const Tensor& mat, const Tensor& vec, const Tensor& bias); TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2); TORCH_API std::vector<Dimname> compute_baddbmm_outnames( const Tensor& result, const Tensor& self, const Tensor& other, const Tensor& bias); TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other); } // namespace namedinference } // namespace at
6,809
30.527778
81
h
null
pytorch-main/aten/src/ATen/NestedTensorImpl.h
#pragma once #include <ATen/MemoryOverlap.h> #include <ATen/Tensor.h> #include <c10/core/DispatchKey.h> #include <c10/core/DispatchKeySet.h> #include <c10/core/MemoryFormat.h> #include <c10/core/TensorImpl.h> #include <c10/util/ArrayRef.h> #include <c10/util/Exception.h> #include <c10/util/Metaprogramming.h> #include <c10/util/irange.h> namespace at { namespace native { struct NestedTensorImpl; inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt); int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor); struct TORCH_API NestedTensorImpl : public c10::TensorImpl { explicit NestedTensorImpl( Storage storage, c10::DispatchKeySet key_set, const caffe2::TypeMeta data_type, at::Tensor nested_sizes, at::Tensor nested_strides, at::Tensor storage_offsets); explicit NestedTensorImpl( at::Tensor buffer, at::Tensor nested_sizes, at::Tensor nested_strides, at::Tensor storage_offsets); // assume contiguous, `nested_strides` and `offsets` // can be infered from `nested_sizes` explicit NestedTensorImpl(at::Tensor buffer, at::Tensor nested_sizes); // This constructor is used creating view tensors from nested tensors explicit NestedTensorImpl( c10::TensorImpl::ImplType impl_type, const at::Tensor& base_tensor, at::Tensor nested_sizes, at::Tensor nested_strides, at::Tensor storage_offsets); // TODO: don't expose private implementation details like this; in // particular, resizing this tensor will mess up our dim() and // callers cannot fix it. const Tensor& get_nested_sizes() const { return nested_sizes_; } // TODO: don't expose private implementation details like this const Tensor& get_nested_strides() const { return nested_strides_; } const Tensor& get_storage_offsets() const { return storage_offsets_; } // Returns nullopt if the ith dimension is irregular. The ith dimension // of a NestedTensor is regular if the unbound tensors match in // size at the (i-1)th dimension. c10::optional<int64_t> opt_size(int64_t d) const; int64_t size(int64_t d) const { c10::optional<int64_t> optional_size = this->opt_size(d); TORCH_CHECK( optional_size.has_value(), "Given dimension ", d, " is irregular and does not have a size."); return *optional_size; } /** * Return a view of the nested tensor as a 1 dimensional contiguous tensor. * * The buffer tensor created by this function shares the same storage_impl as * the original nested tensor, and therefore can be seen as a view. * * @return A newly constructed view tensor */ at::Tensor get_buffer() const { TORCH_CHECK( nested_tensor_impl_is_contiguous(this), "NestedTensor must be contiguous to get buffer."); return get_unsafe_storage_as_tensor(); } /** * If possible use get_buffer() instead. This function returns the storage * as a tensor directly, which is not safe to use in general. If using this * function, The caller must ensure to account for nested_sizes, * nested_strides and storage_offsets. * * @return A newly constructed view tensor */ at::Tensor get_unsafe_storage_as_tensor() const { auto buffer_key_set_ = generate_buffer_key_set(); const auto buffer_size = get_buffer_size(); auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>( c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_); buffer_tensor_impl->set_sizes_contiguous(c10::makeArrayRef(buffer_size)); return Tensor(buffer_tensor_impl); } int64_t get_buffer_size() const { return storage_.nbytes() / data_type_.itemsize(); } protected: const char* tensorimpl_type_name() const override; // TODO: numel_custom and is_contiguous_custom can be profitably overridden // with real implementations int64_t numel_custom() const override; c10::SymInt sym_numel_custom() const override; bool is_contiguous_custom(MemoryFormat) const override; int64_t size_custom(int64_t d) const override { return this->size(d); } c10::SymInt sym_size_custom(int64_t d) const override { return c10::SymInt{this->size(d)}; } IntArrayRef sizes_custom() const override; c10::SymIntArrayRef sym_sizes_custom() const override; IntArrayRef strides_custom() const override; c10::SymIntArrayRef sym_strides_custom() const override; // this one is real int64_t dim_custom() const override; c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( const c10::VariableVersion& version_counter, bool allow_tensor_metadata_change) const override; c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( c10::VariableVersion&& version_counter, bool allow_tensor_metadata_change) const override; void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override { copy_tensor_metadata( /*src_impl=*/impl.get(), /*dest_impl=*/this, /*version_counter=*/version_counter(), /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); } private: // Must be called after any changes to our dim() to sync the state // to TensorImpl. void refresh_dim(); const at::Tensor nested_sizes_, nested_strides_; // The starting positions of the underlying tensors in contiguous buffer // i.e. the buffer memory offsets to get the underlying tensors // The reason to keep this metadata is that, without strong enough constraint // it cannot be derived from `nested_sizes_` // and `nested_strides_`: // 1. when buffer has blanks, e.g. [tensor1, blank, tensor2] // this can happen e.g. after slicing a nested tensor // 2. when multiple tensors share a same memory // 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2] // Some strong enough constraints are: // 1. every underlying tensor is contiguous in memory // && nesting in ascending order const at::Tensor storage_offsets_; // NOTE: -1 here means the size is missing // Optional to allow it to be computed lazily from nested. // TODO: maybe we can remove this metadata since // we can compute it from `nested_sizes_` mutable c10::optional<std::vector<int64_t>> opt_sizes_; template <typename VariableVersion> c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core( VariableVersion&& version_counter, bool allow_tensor_metadata_change) const; /** * Generates a non-nested key_set from a nested tensor. * * For many nested tensor kernel implementations a buffer tensor * is generated and redispatched to a non-nested kernel this function * generates the key set used by that buffer tensor * * @return Appropriate key set for non-nested tensor */ inline c10::DispatchKeySet generate_buffer_key_set() const { auto buffer_key_set = this->key_set(); const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset); // Remove nested tensor specific keys buffer_key_set = buffer_key_set - c10::DispatchKeySet{ c10::DispatchKey::NestedTensor, c10::DispatchKey::AutogradNestedTensor}; // Add dense tensor specific keys buffer_key_set = buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense}; buffer_key_set = Autograd ? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set : buffer_key_set; return buffer_key_set; } }; inline NestedTensorImpl* get_nested_tensor_impl_or_null( const at::Tensor& tensor) { if (tensor.is_nested()) { return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl()); } return nullptr; } inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) { TORCH_CHECK( tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor."); return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl()); } inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) { int64_t ntensors = nt->size(0); if (ntensors == 0) { return true; } const Tensor &sizemat = nt->get_nested_sizes(), &stridemat = nt->get_nested_strides(); int64_t* offsets_ptr = nt->get_storage_offsets().data_ptr<int64_t>(); int64_t orig_dim = sizemat.size(1); // nesting scalars if (orig_dim == 0) { // each scalar must be contiguous // if there is blank memory between underlying scalars for (int64_t i = 0; i < ntensors; i++) { if (offsets_ptr[i] != i) { return false; } } } // nesting tensors else { // if any underlying tensor is non-contiguous const int64_t *sizemat_ptr = sizemat.data_ptr<int64_t>(), *stridemat_ptr = stridemat.data_ptr<int64_t>(); for (int64_t i = 0; i < ntensors; i++) { if (stridemat_ptr[orig_dim - 1] != 1) { return false; } int64_t product = sizemat_ptr[orig_dim - 1]; for (int64_t j = orig_dim - 2; j >= 0; j--) { if (stridemat_ptr[j] != product) { return false; } product *= sizemat_ptr[j]; } sizemat_ptr += orig_dim; stridemat_ptr += orig_dim; } // if there is blank memory between underlying tensors if (offsets_ptr[0] != 0) { return false; } sizemat_ptr = sizemat.data_ptr<int64_t>(); stridemat_ptr = stridemat.data_ptr<int64_t>(); for (int64_t i = 1; i < ntensors; i++) { if (offsets_ptr[i] != offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) { return false; } sizemat_ptr += orig_dim; stridemat_ptr += orig_dim; } } // everything is fine return true; } inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) { return get_nested_tensor_impl(tensor)->get_nested_sizes(); } } // namespace native } // namespace at
9,870
34.128114
80
h
null
pytorch-main/aten/src/ATen/NumericUtils.h
#pragma once #ifdef __HIPCC__ #include <hip/hip_runtime.h> #endif #include <c10/macros/Macros.h> #include <c10/util/BFloat16.h> #include <c10/util/Half.h> #include <c10/util/complex.h> #include <cmath> #include <type_traits> namespace at { // std::isnan isn't performant to use on integral types; it will // (uselessly) convert to floating point and then do the test. // This function is. template < typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0> inline C10_HOST_DEVICE bool _isnan(T /*val*/) { return false; } template < typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0> inline C10_HOST_DEVICE bool _isnan(T val) { #if defined(__CUDACC__) || defined(__HIPCC__) return ::isnan(val); #else return std::isnan(val); #endif } template < typename T, typename std::enable_if<c10::is_complex<T>::value, int>::type = 0> inline C10_HOST_DEVICE bool _isnan(T val) { return std::isnan(val.real()) || std::isnan(val.imag()); } template < typename T, typename std::enable_if<std::is_same<T, at::Half>::value, int>::type = 0> inline C10_HOST_DEVICE bool _isnan(T val) { return at::_isnan(static_cast<float>(val)); } template < typename T, typename std::enable_if<std::is_same<T, at::BFloat16>::value, int>::type = 0> inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) { return at::_isnan(static_cast<float>(val)); } inline C10_HOST_DEVICE bool _isnan(at::BFloat16 val) { return at::_isnan(static_cast<float>(val)); } // std::isinf isn't performant to use on integral types; it will // (uselessly) convert to floating point and then do the test. // This function is. template < typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0> inline C10_HOST_DEVICE bool _isinf(T /*val*/) { return false; } template < typename T, typename std::enable_if<std::is_floating_point<T>::value, int>::type = 0> inline C10_HOST_DEVICE bool _isinf(T val) { #if defined(__CUDACC__) || defined(__HIPCC__) return ::isinf(val); #else return std::isinf(val); #endif } inline C10_HOST_DEVICE bool _isinf(at::Half val) { return at::_isinf(static_cast<float>(val)); } inline C10_HOST_DEVICE bool _isinf(at::BFloat16 val) { return at::_isinf(static_cast<float>(val)); } template <typename T> C10_HOST_DEVICE inline T exp(T x) { static_assert( !std::is_same<T, double>::value, "this template must be used with float or less precise type"); #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) // use __expf fast approximation for peak bandwidth return __expf(x); #else return ::exp(x); #endif } template <> C10_HOST_DEVICE inline double exp<double>(double x) { return ::exp(x); } template <typename T> C10_HOST_DEVICE inline T log(T x) { static_assert( !std::is_same<T, double>::value, "this template must be used with float or less precise type"); #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) // use __logf fast approximation for peak bandwidth return __logf(x); #else return ::log(x); #endif } template <> C10_HOST_DEVICE inline double log<double>(double x) { return ::log(x); } template <typename T> C10_HOST_DEVICE inline T log1p(T x) { static_assert( !std::is_same<T, double>::value, "this template must be used with float or less precise type"); #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) // use __logf fast approximation for peak bandwidth // NOTE: There is no __log1pf so unfortunately we lose precision. return __logf(1.0f + x); #else return ::log1p(x); #endif } template <> C10_HOST_DEVICE inline double log1p<double>(double x) { return ::log1p(x); } template <typename T> C10_HOST_DEVICE inline T tan(T x) { static_assert( !std::is_same<T, double>::value, "this template must be used with float or less precise type"); #if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) // use __tanf fast approximation for peak bandwidth return __tanf(x); #else return ::tan(x); #endif } template <> C10_HOST_DEVICE inline double tan<double>(double x) { return ::tan(x); } } // namespace at
4,157
23.60355
78
h
null
pytorch-main/aten/src/ATen/OpMathType.h
#pragma once #include <c10/core/ScalarType.h> #include <c10/util/BFloat16.h> #include <c10/util/Exception.h> #include <c10/util/Half.h> namespace at { // For FP16 or BFloat16 inputs, ops should perform internal math in FP32. template <typename scalar_t> struct OpMathType { using type = scalar_t; }; template <> struct OpMathType<at::Half> { using type = float; }; template <> struct OpMathType<at::BFloat16> { using type = float; }; template <> struct OpMathType<c10::complex<Half>> { using type = c10::complex<float>; }; template <typename T> using opmath_type = typename OpMathType<T>::type; namespace { inline c10::ScalarType toOpMathType(const c10::ScalarType type) { switch (type) { #define DEFINE_CASE(scalar_t, TypeNum) \ case ScalarType::TypeNum: \ return CppTypeToScalarType<at::opmath_type<scalar_t>>::value; AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(DEFINE_CASE) #undef DEFINE_CASE default: TORCH_INTERNAL_ASSERT(false, "Unrecognized ScalarType: ", type); } } } // namespace } // namespace at
1,052
20.06
73
h
null
pytorch-main/aten/src/ATen/OpaqueTensorImpl.h
#pragma once #include <c10/core/MemoryFormat.h> #include <c10/core/SymIntArrayRef.h> #include <c10/core/TensorImpl.h> #include <c10/util/Exception.h> namespace at { // An "Opaque" TensorImpl -- there are no strides and (for now) // even data() is not supported (thus no pointer arithmetic). // NOTE: We could allow data() in the future, but would have to ensure pointer // arithmetic code is properly guarded. // // NOTE: This does not support resize_ (and other metadata-changing ops) because // of `shallow_copy_and_detach`. We would need to define an interface to // "shallow copy" in order to add support. template <typename OpaqueHandle> struct TORCH_API OpaqueTensorImpl : public TensorImpl { // public constructor for now... OpaqueTensorImpl( at::DispatchKeySet key_set, const caffe2::TypeMeta data_type, c10::Device device, OpaqueHandle opaque_handle, c10::IntArrayRef sizes, bool is_non_overlapping_and_dense = true) : TensorImpl(key_set, data_type, device), opaque_handle_(std::move(opaque_handle)) { set_storage_access_should_throw(); set_custom_sizes_strides(SizesStridesPolicy::CustomStrides); sizes_and_strides_.set_sizes(sizes); refresh_numel(); is_non_overlapping_and_dense_ = is_non_overlapping_and_dense; } // Destructor doesn't call release_resources because it's // unnecessary; don't forget to change that if needed! void release_resources() override { TensorImpl::release_resources(); opaque_handle_ = {}; } void set_size(int64_t dim, int64_t new_size) override { AT_ERROR("opaque tensors do not have set_size"); } void set_stride(int64_t dim, int64_t new_stride) override { AT_ERROR("opaque tensors do not have set_stride"); } void set_storage_offset(int64_t storage_offset) override { AT_ERROR("opaque tensors do not have set_storage_offset"); } #ifdef DEBUG bool has_storage() const override { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( !storage_, "OpaqueTensorImpl assumes that storage_ is never set"); return false; } #endif /** * Return a TensorImpl that is a shallow-copy of this TensorImpl. * * For usage of `version_counter` and `allow_tensor_metadata_change`, * see NOTE [ TensorImpl Shallow-Copying ]. */ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( const c10::VariableVersion& version_counter, bool allow_tensor_metadata_change) const override { auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>( key_set(), dtype(), device(), opaque_handle_, sizes_and_strides_.sizes_arrayref()); copy_tensor_metadata( /*src_opaque_impl=*/this, /*dest_opaque_impl=*/impl.get(), /*version_counter=*/version_counter, /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); impl->refresh_numel(); return impl; } /** * Return a TensorImpl that is a shallow-copy of this TensorImpl. * * For usage of `version_counter` and `allow_tensor_metadata_change`, * see NOTE [ TensorImpl Shallow-Copying ]. */ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( c10::VariableVersion&& version_counter, bool allow_tensor_metadata_change) const override { auto impl = c10::make_intrusive<OpaqueTensorImpl<OpaqueHandle>>( key_set(), dtype(), device(), opaque_handle_, sizes_and_strides_.sizes_arrayref()); copy_tensor_metadata( /*src_opaque_impl=*/this, /*dest_opaque_impl=*/impl.get(), /*version_counter=*/std::move(version_counter), /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); impl->refresh_numel(); return impl; } /** * Shallow-copies data from another TensorImpl into this TensorImpl. * * For why this function doesn't check this TensorImpl's * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ]. */ void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override { AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set())); auto opaque_impl = static_cast<const OpaqueTensorImpl<OpaqueHandle>*>(impl.get()); copy_tensor_metadata( /*src_impl=*/opaque_impl, /*dest_impl=*/this, /*version_counter=*/version_counter(), /*allow_tensor_metadata_change=*/allow_tensor_metadata_change()); refresh_numel(); } const OpaqueHandle& opaque_handle() const { return opaque_handle_; } OpaqueHandle& unsafe_opaque_handle() { return opaque_handle_; } protected: /** * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / * storage_offset) from one TensorImpl to another TensorImpl. * * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE * [ TensorImpl Shallow-Copying ]. */ static void copy_tensor_metadata( const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl, OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl, const c10::VariableVersion& version_counter, bool allow_tensor_metadata_change) { TensorImpl::copy_tensor_metadata( src_opaque_impl, dest_opaque_impl, version_counter, allow_tensor_metadata_change); // OpaqueTensorImpl-specific fields. dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; } static void copy_tensor_metadata( const OpaqueTensorImpl<OpaqueHandle>* src_opaque_impl, OpaqueTensorImpl<OpaqueHandle>* dest_opaque_impl, c10::VariableVersion&& version_counter, bool allow_tensor_metadata_change) { TensorImpl::copy_tensor_metadata( src_opaque_impl, dest_opaque_impl, std::move(version_counter), allow_tensor_metadata_change); // OpaqueTensorImpl-specific fields. dest_opaque_impl->opaque_handle_ = src_opaque_impl->opaque_handle_; } private: const char* tensorimpl_type_name() const override { return "OpaqueTensorImpl"; } OpaqueHandle opaque_handle_; }; } // namespace at
6,096
31.604278
80
h
null
pytorch-main/aten/src/ATen/Parallel-inl.h
#pragma once #include <c10/util/Exception.h> #include <c10/util/SmallVector.h> namespace at { template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY(grain_size >= 0); if (begin >= end) { return; } #ifdef INTRA_OP_PARALLEL at::internal::lazy_init_num_threads(); const auto numiter = end - begin; const bool use_parallel = (numiter > grain_size && numiter > 1 && !at::in_parallel_region() && at::get_num_threads() > 1); if (!use_parallel) { internal::ThreadIdGuard tid_guard(0); f(begin, end); return; } internal::invoke_parallel(begin, end, grain_size, f); #else internal::ThreadIdGuard tid_guard(0); f(begin, end); #endif } template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F& f, const SF& sf) { TORCH_CHECK(grain_size >= 0); if (begin >= end) { return ident; } #ifdef INTRA_OP_PARALLEL at::internal::lazy_init_num_threads(); const auto max_threads = at::get_num_threads(); const bool use_parallel = ((end - begin) > grain_size && !at::in_parallel_region() && max_threads > 1); if (!use_parallel) { internal::ThreadIdGuard tid_guard(0); return f(begin, end, ident); } c10::SmallVector<scalar_t, 64> results(max_threads, ident); internal::invoke_parallel( begin, end, grain_size, [&](const int64_t my_begin, const int64_t my_end) { const auto tid = at::get_thread_num(); results[tid] = f(my_begin, my_end, ident); }); scalar_t result = ident; for (auto partial_result : results) { result = sf(result, partial_result); } return result; #else internal::ThreadIdGuard tid_guard(0); return f(begin, end, ident); #endif } } // namespace at
1,967
22.428571
74
h
null
pytorch-main/aten/src/ATen/Parallel.h
#pragma once #include <ATen/Config.h> #include <c10/macros/Macros.h> #include <functional> #include <string> namespace at { inline int64_t divup(int64_t x, int64_t y) { return (x + y - 1) / y; } // Called during new thread initialization TORCH_API void init_num_threads(); // Sets the number of threads to be used in parallel region TORCH_API void set_num_threads(int); // Returns the maximum number of threads that may be used in a parallel region TORCH_API int get_num_threads(); // Returns the current thread number (starting from 0) // in the current parallel region, or 0 in the sequential region TORCH_API int get_thread_num(); // Checks whether the code runs in parallel region TORCH_API bool in_parallel_region(); namespace internal { // Initialise num_threads lazily at first parallel call inline void lazy_init_num_threads() { thread_local bool init = false; if (C10_UNLIKELY(!init)) { at::init_num_threads(); init = true; } } TORCH_API void set_thread_num(int); class TORCH_API ThreadIdGuard { public: ThreadIdGuard(int new_id) : old_id_(at::get_thread_num()) { set_thread_num(new_id); } ~ThreadIdGuard() { set_thread_num(old_id_); } private: int old_id_; }; } // namespace internal /* parallel_for begin: index at which to start applying user function end: index at which to stop applying user function grain_size: number of elements per chunk. impacts the degree of parallelization f: user function applied in parallel to the chunks, signature: void f(int64_t begin, int64_t end) Warning: parallel_for does NOT copy thread local states from the current thread to the worker threads. This means for example that Tensor operations CANNOT be used in the body of your function, only data pointers. */ template <class F> inline void parallel_for( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f); /* parallel_reduce begin: index at which to start applying reduction end: index at which to stop applying reduction grain_size: number of elements per chunk. impacts number of elements in intermediate results tensor and degree of parallelization. ident: identity for binary combination function sf. sf(ident, x) needs to return x. f: function for reduction over a chunk. f needs to be of signature scalar_t f(int64_t partial_begin, int64_t partial_end, scalar_t identifiy) sf: function to combine two partial results. sf needs to be of signature scalar_t sf(scalar_t x, scalar_t y) For example, you might have a tensor of 10000 entires and want to sum together all the elements. Parallel_reduce with a grain_size of 2500 will then allocate an intermediate result tensor with 4 elements. Then it will execute the function "f" you provide and pass the beginning and end index of these chunks, so 0-2499, 2500-4999, etc. and the combination identity. It will then write out the result from each of these chunks into the intermediate result tensor. After that it'll reduce the partial results from each chunk into a single number using the combination function sf and the identity ident. For a total summation this would be "+" and 0 respectively. This is similar to tbb's approach [1], where you need to provide a function to accumulate a subrange, a function to combine two partial results and an identity. Warning: parallel_reduce does NOT copy thread local states from the current thread to the worker threads. This means for example that Tensor operations CANNOT be used in the body of your function, only data pointers. [1] https://software.intel.com/en-us/node/506154 */ template <class scalar_t, class F, class SF> inline scalar_t parallel_reduce( const int64_t begin, const int64_t end, const int64_t grain_size, const scalar_t ident, const F& f, const SF& sf); // Returns a detailed string describing parallelization settings TORCH_API std::string get_parallel_info(); // Sets number of threads used for inter-op parallelism TORCH_API void set_num_interop_threads(int); // Returns the number of threads used for inter-op parallelism TORCH_API int get_num_interop_threads(); // Launches inter-op parallel task TORCH_API void launch(std::function<void()> func); namespace internal { void launch_no_thread_state(std::function<void()> fn); } // namespace internal // Launches intra-op parallel task TORCH_API void intraop_launch(std::function<void()> func); // Returns number of intra-op threads used by default TORCH_API int intraop_default_num_threads(); } // namespace at #if AT_PARALLEL_OPENMP #include <ATen/ParallelOpenMP.h> // IWYU pragma: keep #elif AT_PARALLEL_NATIVE #include <ATen/ParallelNative.h> // IWYU pragma: keep #elif AT_PARALLEL_NATIVE_TBB #include <ATen/ParallelNativeTBB.h> // IWYU pragma: keep #endif #include <ATen/Parallel-inl.h> // IWYU pragma: keep
4,829
29
80
h
null
pytorch-main/aten/src/ATen/ParallelNativeTBB.h
#pragma once #include <atomic> #include <cstddef> #include <exception> #include <c10/util/Exception.h> #ifdef _WIN32 #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #endif #include <tbb/tbb.h> #define INTRA_OP_PARALLEL namespace at { namespace internal { template <typename F> inline void invoke_parallel( const int64_t begin, const int64_t end, const int64_t grain_size, const F& f) { // Choose number of tasks based on grain size and number of threads. int64_t chunk_size = divup((end - begin), get_num_threads()); // Make sure each task is at least grain_size size. chunk_size = std::max(grain_size, chunk_size); std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; tbb::parallel_for( tbb::blocked_range<int64_t>(begin, end, chunk_size), [&eptr, &err_flag, f](const tbb::blocked_range<int64_t>& r) { try { internal::ThreadIdGuard tid_guard( tbb::this_task_arena::current_thread_index()); f(r.begin(), r.end()); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } }, tbb::static_partitioner{}); if (eptr) { std::rethrow_exception(eptr); } } } // namespace internal } // namespace at
1,311
22.854545
70
h
null
pytorch-main/aten/src/ATen/ParallelOpenMP.h
#pragma once #include <atomic> #include <cstddef> #include <exception> #ifdef _OPENMP #define INTRA_OP_PARALLEL #include <omp.h> #endif namespace at { #ifdef _OPENMP namespace internal { template <typename F> inline void invoke_parallel( int64_t begin, int64_t end, int64_t grain_size, const F& f) { std::atomic_flag err_flag = ATOMIC_FLAG_INIT; std::exception_ptr eptr; #pragma omp parallel { // choose number of tasks based on grain size and number of threads // can't use num_threads clause due to bugs in GOMP's thread pool (See // #32008) int64_t num_threads = omp_get_num_threads(); if (grain_size > 0) { num_threads = std::min(num_threads, divup((end - begin), grain_size)); } int64_t tid = omp_get_thread_num(); int64_t chunk_size = divup((end - begin), num_threads); int64_t begin_tid = begin + tid * chunk_size; if (begin_tid < end) { try { internal::ThreadIdGuard tid_guard(tid); f(begin_tid, std::min(end, chunk_size + begin_tid)); } catch (...) { if (!err_flag.test_and_set()) { eptr = std::current_exception(); } } } } if (eptr) { std::rethrow_exception(eptr); } } } // namespace internal #endif // _OPENMP } // namespace at
1,289
21.241379
76
h
null
pytorch-main/aten/src/ATen/PythonTorchFunctionTLS.h
#pragma once #include <c10/core/SafePyObject.h> #include <c10/macros/Macros.h> namespace at { namespace impl { enum TorchFunctionDisabledState { ENABLED, SUBCLASSES_DISABLED, ALL_DISABLED }; struct TORCH_API PythonTorchFunctionTLS { static void set_disabled_state(TorchFunctionDisabledState disabled_state_); static TorchFunctionDisabledState get_disabled_state(); static void push_onto_stack(std::shared_ptr<SafePyObject> mode); static const std::shared_ptr<SafePyObject> pop_stack(); static const std::shared_ptr<SafePyObject>& get_stack_at(int64_t idx); static int64_t stack_len(); static const PythonTorchFunctionTLS& get_state(); static void set_state(const PythonTorchFunctionTLS& state); private: // The mode TLS is split into // - disabled_state, which says which part of torch function are disabled // - stack_, which is a vector of modes representing the stack of user // defined modes TorchFunctionDisabledState disabled_state_ = TorchFunctionDisabledState::ENABLED; std::vector<std::shared_ptr<c10::SafePyObject>> stack_; }; TORCH_API bool torch_function_mode_enabled(); } // namespace impl } // namespace at
1,171
30.675676
79
h
null
pytorch-main/aten/src/ATen/SavedTensorHooks.h
#pragma once #include <c10/macros/Export.h> #include <c10/util/Optional.h> #include <c10/util/python_stub.h> #include <stack> #include <string> #include <utility> namespace at { namespace impl { struct TORCH_API SavedTensorDefaultHooksTLS { // PyObject is defined in c10/util/python_stub.h std::stack<std::pair<PyObject*, PyObject*>> stack; // See NOTE: [Disabling SavedTensorDefaultHooks] for context // NOTE: [disabled_error_message invariant] // disabled_error_message is nullopt IFF Saved Tensor hooks is enabled // We did this for efficiency (so we didn't have to keep a separate bool // around) c10::optional<std::string> disabled_error_message; }; } // namespace impl struct TORCH_API SavedTensorDefaultHooks { static void push_hooks(PyObject* pack_hook, PyObject* unpack_hook); static void pop_hooks(); static std::pair<PyObject*, PyObject*> get_hooks(); static void lazy_initialize(); static std::stack<std::pair<PyObject*, PyObject*>> get_stack(); static void set_stack(std::stack<std::pair<PyObject*, PyObject*>>); static const impl::SavedTensorDefaultHooksTLS& get_tls_state(); static void set_tls_state(const impl::SavedTensorDefaultHooksTLS& tls); // NOTE: [Disabling SavedTensorDefaultHooks] // A developer of a PyTorch feature may choose to disable SavedTensorDefault // hooks, especially if their feature does not work with it. If they are // disabled, then the following will raise an error: // - Attempting to push_hooks // - calling disable(message) with a non-zero stack (from get_stack) size static void disable(const std::string& error_message); static void enable(); static bool is_enabled(); static const c10::optional<std::string>& get_disabled_error_message(); }; } // namespace at
1,772
32.45283
78
h
null
pytorch-main/aten/src/ATen/ScalarOps.h
#pragma once #include <ATen/Tensor.h> #include <c10/core/Scalar.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/scalar_tensor.h> #endif namespace at { namespace detail { // When filling a number to 1-element CPU tensor, we want to skip // everything but manipulate data ptr directly. // Ideally this fast pass should be implemented in TensorIterator, // but we also want to skip compute_types which in not avoidable // in TensorIterator for now. Tensor& scalar_fill(Tensor& self, const Scalar& value); TORCH_API Tensor scalar_tensor_static( const Scalar& s, c10::optional<ScalarType> dtype_opt, c10::optional<Device> device_opt); } // namespace detail } // namespace at // This is in the c10 namespace because we use ADL to find the functions in it. namespace c10 { // FIXME: this should be (and was) Scalar::toTensor, but there is currently no // way to implement this without going through Derived Types (which are not part // of core). inline at::Tensor scalar_to_tensor( const Scalar& s, const Device device = at::kCPU) { // This is the fast track we have for CPU scalar tensors. if (device == at::kCPU) { if (s.isFloatingPoint()) { return at::detail::scalar_tensor_static(s, at::kDouble, at::kCPU); } else if (s.isComplex()) { return at::detail::scalar_tensor_static(s, at::kComplexDouble, at::kCPU); } else if (s.isBoolean()) { return at::detail::scalar_tensor_static(s, at::kBool, at::kCPU); } else { AT_ASSERT(s.isIntegral(false)); return at::detail::scalar_tensor_static(s, at::kLong, at::kCPU); } } if (s.isFloatingPoint()) { return at::scalar_tensor(s, at::device(device).dtype(at::kDouble)); } else if (s.isBoolean()) { return at::scalar_tensor(s, at::device(device).dtype(at::kBool)); } else if (s.isComplex()) { return at::scalar_tensor(s, at::device(device).dtype(at::kComplexDouble)); } else { AT_ASSERT(s.isIntegral(false)); return at::scalar_tensor(s, at::device(device).dtype(at::kLong)); } } } // namespace c10 namespace at { namespace native { inline Tensor wrapped_scalar_tensor( const Scalar& scalar, const Device device = at::kCPU) { auto tensor = scalar_to_tensor(scalar, device); tensor.unsafeGetTensorImpl()->set_wrapped_number(true); return tensor; } } // namespace native } // namespace at
2,388
30.434211
80
h
null
pytorch-main/aten/src/ATen/SparseCsrTensorImpl.h
#pragma once #include <ATen/Tensor.h> #include <c10/core/TensorImpl.h> #include <c10/util/Exception.h> namespace at { // Struct implementing a sparse CSR tensor. It uses three 1-D tensors for // denoting the data: `crow_indices_`, `col_indices_` and `values_`. // The `crow_indices_` tensor is a integer tensor of shape `(size(0) + 1)` // that represents the compressed row indices of the CSR tensor. The // `col_indices_` tensor is an integer tensor of shape `(nnz())` // that explicitly stores the column indices of each value of the sparse // tensor. The `values_` tensor can be of any pytorch-supported data type // and has shape `(nnz())`. // // Since the main advantage of the CSR format over the COO format is speed of // computation, care must be taken to facilitate smooth interfacing of // these data structures with optimized libraries such as MKL and MAGMA. // Since the MKL interface for pytorch currently uses indexing with int32 // type, it is important to make sure that the `crow_indices` and `col_indices` // are of type int32 when calling MKL routines such as SPMM or SPMV. // // If not calling MKL, it should be alright to use 64 bit integer tensors // for indexing. struct TORCH_API SparseCsrTensorImpl : public TensorImpl { Tensor crow_indices_; Tensor col_indices_; Tensor values_; Layout layout_; public: explicit SparseCsrTensorImpl( at::DispatchKeySet, at::Device device, Layout layout, const caffe2::TypeMeta); void resize_(int64_t nnz, IntArrayRef size); void resize_and_clear_( int64_t sparse_dim, int64_t dense_dim, IntArrayRef size); void resize_as_sparse_compressed_tensor_(const Tensor& src); void set_member_tensors( const Tensor& crow_indices, const Tensor& col_indices, const Tensor& values, IntArrayRef size); const Tensor& compressed_indices() const { return crow_indices_; } const Tensor& plain_indices() const { return col_indices_; } const Tensor& values() const { return values_; } int64_t nnz() { return col_indices_.size(-1); } inline int64_t batch_dim() const noexcept { return crow_indices_.dim() - 1; } inline int64_t sparse_dim() const noexcept { return 2; } inline int64_t dense_dim() const noexcept { return values_.dim() - batch_dim() - block_dim() - 1; } private: inline int64_t block_dim() const noexcept { return (layout_ == kSparseBsr || layout_ == kSparseBsc ? 2 : 0); } protected: IntArrayRef strides_custom() const override; SymIntArrayRef sym_strides_custom() const override; bool is_contiguous_custom(MemoryFormat) const override; public: void set_size(int64_t dim, int64_t new_size) override; void set_stride(int64_t dim, int64_t new_stride) override; void set_storage_offset(int64_t storage_offset) override; Layout layout_impl() const override { return layout_; } void set_layout(Layout layout) { switch (layout) { case kSparseCsr: case kSparseCsc: case kSparseBsr: case kSparseBsc: layout_ = layout; break; default: TORCH_CHECK(false, "unsupported layout ", layout); } } /** * Return a TensorImpl that is a shallow-copy of this TensorImpl. * * For usage of `version_counter` and `allow_tensor_metadata_change`, * see NOTE [ TensorImpl Shallow-Copying ]. */ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( const c10::VariableVersion& version_counter, bool allow_tensor_metadata_change) const override { auto impl = c10::make_intrusive<SparseCsrTensorImpl>( key_set(), device(), layout_impl(), dtype()); copy_tensor_metadata( /*src_impl=*/this, /*dest_impl=*/impl.get(), /*version_counter=*/version_counter, /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); impl->refresh_numel(); return impl; } /** * Return a TensorImpl that is a shallow-copy of this TensorImpl. * * For usage of `version_counter` and `allow_tensor_metadata_change`, * see NOTE [ TensorImpl Shallow-Copying ]. */ c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach( c10::VariableVersion&& version_counter, bool allow_tensor_metadata_change) const override { auto impl = c10::make_intrusive<SparseCsrTensorImpl>( key_set(), device(), layout_impl(), dtype()); copy_tensor_metadata( /*src_impl=*/this, /*dest_impl=*/impl.get(), /*version_counter=*/std::move(version_counter), /*allow_tensor_metadata_change=*/allow_tensor_metadata_change); impl->refresh_numel(); return impl; } private: explicit SparseCsrTensorImpl( at::DispatchKeySet key_set, const caffe2::TypeMeta data_type, at::Tensor crow_indices, at::Tensor col_indices, at::Tensor values, at::Layout layout); const char* tensorimpl_type_name() const override; /** * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer / * storage_offset) from one TensorImpl to another TensorImpl. * * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE * [ TensorImpl Shallow-Copying ]. */ static void copy_tensor_metadata( const SparseCsrTensorImpl* src_sparse_impl, SparseCsrTensorImpl* dest_sparse_impl, const c10::VariableVersion& version_counter, bool allow_tensor_metadata_change) { TensorImpl::copy_tensor_metadata( src_sparse_impl, dest_sparse_impl, version_counter, allow_tensor_metadata_change); // Sparse-specific fields dest_sparse_impl->crow_indices_ = src_sparse_impl->compressed_indices(); dest_sparse_impl->col_indices_ = src_sparse_impl->plain_indices(); dest_sparse_impl->values_ = src_sparse_impl->values(); dest_sparse_impl->layout_ = src_sparse_impl->layout_impl(); } }; } // namespace at
5,927
31.393443
80
h
null
pytorch-main/aten/src/ATen/SparseCsrTensorUtils.h
#pragma once #include <ATen/SparseCsrTensorImpl.h> #include <ATen/SparseTensorImpl.h> #include <ATen/core/Tensor.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #include <ATen/Operators.h> #else #include <ATen/ops/resize_as_sparse_native.h> #endif #define AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS(LAYOUT, NAME, ...) \ [&] { \ const auto& the_layout = LAYOUT; \ switch (the_layout) { \ case kSparseCsr: \ case kSparseCsc: \ case kSparseBsr: \ case kSparseBsc: \ return __VA_ARGS__(); \ default: \ AT_ERROR( \ NAME, \ " expected sparse compressed tensor layout but got ", \ the_layout); \ } \ }() #define AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( \ LAYOUT, NAME, ROW_DIM_ACTION, COLUMN_DIM_ACTION) \ [&]() { \ const auto& the_layout = LAYOUT; \ switch (the_layout) { \ case kSparseCsr: \ case kSparseBsr: \ return (ROW_DIM_ACTION)(); \ case kSparseCsc: \ case kSparseBsc: \ return (COLUMN_DIM_ACTION)(); \ default: \ AT_ERROR( \ NAME, \ " expected sparse compressed tensor layout but got ", \ the_layout); \ } \ }() #define AT_DISPATCH_PLAIN_SPARSE_COMPRESSED_LAYOUTS( \ LAYOUT, NAME, NO_BLOCK_ACTION, BLOCK_ACTION) \ [&]() { \ const auto& the_layout = LAYOUT; \ switch (the_layout) { \ case kSparseCsr: \ case kSparseCsc: \ return (NO_BLOCK_ACTION)(); \ case kSparseBsr: \ case kSparseBsc: \ return (BLOCK_ACTION)(); \ default: \ AT_ERROR( \ NAME, \ " expected sparse compressed tensor layout but got ", \ the_layout); \ } \ }() #define AT_DISPATCH_SPARSE_ROW_COMPRESSED_LAYOUTS( \ LAYOUT, NAME, ROW_DIM_ACTION) \ [&]() { \ const auto& the_layout = LAYOUT; \ switch (the_layout) { \ case kSparseCsr: \ case kSparseBsr: \ return (ROW_DIM_ACTION)(); \ default: \ AT_ERROR( \ NAME, \ " expected sparse row compressed tensor layout but got ", \ the_layout); \ } \ }() #define AT_DISPATCH_SPARSE_COL_COMPRESSED_LAYOUTS( \ LAYOUT, NAME, COL_DIM_ACTION) \ [&]() { \ const auto& the_layout = LAYOUT; \ switch (the_layout) { \ case kSparseCsc: \ case kSparseBsc: \ return (COL_DIM_ACTION)(); \ default: \ AT_ERROR( \ NAME, \ " expected sparse column compressed tensor layout but got ", \ the_layout); \ } \ }() #define AT_DISPATCH_SPARSE_COMPRESSED_NONBLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \ [&]() { \ const auto& the_layout = LAYOUT; \ switch (the_layout) { \ case kSparseCsr: \ case kSparseCsc: \ return (ACTION)(); \ default: \ AT_ERROR( \ NAME, \ " expected sparse compressed (non-block) tensor layout but got ", \ the_layout); \ } \ }() #define AT_DISPATCH_SPARSE_COMPRESSED_BLOCK_LAYOUTS(LAYOUT, NAME, ACTION) \ [&]() { \ const auto& the_layout = LAYOUT; \ switch (the_layout) { \ case kSparseBsr: \ case kSparseBsc: \ return (ACTION)(); \ default: \ AT_ERROR( \ NAME, \ " expected sparse compressed block tensor layout but got ", \ the_layout); \ } \ }() #define AT_DISPATCH_SPARSE_VALUE_TYPES(TYPE, NAME, ...) \ AT_DISPATCH_SWITCH( \ TYPE, \ NAME, \ AT_DISPATCH_CASE_ALL_TYPES_AND_COMPLEX_AND4( \ kComplexHalf, kHalf, kBool, kBFloat16, __VA_ARGS__)) namespace at { namespace sparse_csr { using SparseCsrTensor = Tensor; inline bool is_sparse_compressed(const Layout& layout) { switch (layout) { case kSparseCsr: case kSparseCsc: case kSparseBsr: case kSparseBsc: return true; default:; } return false; } inline bool is_sparse_compressed(const Tensor& self) { return is_sparse_compressed(self.layout()); } inline SparseCsrTensorImpl* get_sparse_csr_impl(const SparseCsrTensor& self) { AT_DISPATCH_ALL_SPARSE_COMPRESSED_LAYOUTS( self.layout(), "get_sparse_csr_impl", [&] {}); return static_cast<SparseCsrTensorImpl*>(self.unsafeGetTensorImpl()); } inline std::string layoutToString( Layout layout, bool upper = false, bool lower = false) { switch (layout) { case kSparseCsr: return (upper ? "CSR" : (lower ? "csr" : "Csr")); case kSparseCsc: return (upper ? "CSC" : (lower ? "csc" : "Csc")); case kSparseBsr: return (upper ? "BSR" : (lower ? "bsr" : "Bsr")); case kSparseBsc: return (upper ? "BSC" : (lower ? "bsc" : "Bsc")); default: TORCH_CHECK(false, "Not a sparse compressed layout:", layout); return ""; } } inline bool isCompressedRow(Layout layout) { return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( layout, "isCompressedRow", [&] { return true; }, [&] { return false; }); } inline bool isCompressedColumn(Layout layout) { return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( layout, "isCompressedColumn", [&] { return false; }, [&] { return true; }); } inline std::string compressedIndicesName(Layout layout) { return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( layout, "compressedIndicesName", [&] { return "crow_indices"; }, [&] { return "ccol_indices"; }); } inline std::string plainIndicesName(Layout layout) { return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( layout, "plainIndicesName", [&] { return "col_indices"; }, [&] { return "row_indices"; }); } inline std::string compressedDimName(Layout layout) { switch (layout) { case kSparseCsr: return "row"; case kSparseCsc: return "column"; case kSparseBsr: return "row block"; case kSparseBsc: return "column block"; default: TORCH_CHECK(false, "Not a sparse compressed layout:", layout); return ""; } } inline std::string plainDimName(Layout layout) { switch (layout) { case kSparseCsr: return "column"; case kSparseCsc: return "row"; case kSparseBsr: return "column block"; case kSparseBsc: return "row block"; default: TORCH_CHECK(false, "Not a sparse compressed layout:", layout); return ""; } } inline int rowDimension(Layout layout, IntArrayRef size) { return size.size() - (isCompressedRow(layout) ? 2 : 1); } inline int columnDimension(Layout layout, IntArrayRef size) { return size.size() - (isCompressedColumn(layout) ? 2 : 1); } inline int compressedDimension( Layout layout, IntArrayRef size, size_t dense_ndim = 0) { return size.size() - dense_ndim - (isCompressedRow(layout) ? 2 : 1); } inline int plainDimension( Layout layout, IntArrayRef size, size_t dense_ndim = 0) { return size.size() - dense_ndim - (isCompressedRow(layout) ? 1 : 2); } inline int64_t numBatchDimensions(Tensor const& self) { return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( self.layout(), "numBatchDimensions", [&self] { return self.crow_indices().dim() - 1; }, [&self] { return self.ccol_indices().dim() - 1; }); } inline std::pair<Tensor, Tensor> getCompressedPlainIndices(Tensor const& self) { return AT_DISPATCH_ROW_SPARSE_COMPRESSED_LAYOUTS( self.layout(), "getCompressedPlainIndices", [&self] { return std::make_pair(self.crow_indices(), self.col_indices()); }, [&self] { return std::make_pair(self.ccol_indices(), self.row_indices()); }); } inline Layout flip_compressed_layout(Layout layout) { switch (layout) { case kSparseCsr: return kSparseCsc; case kSparseCsc: return kSparseCsr; case kSparseBsr: return kSparseBsc; case kSparseBsc: return kSparseBsr; default: TORCH_CHECK(false, "Not a sparse compressed layout:", layout); return kSparseCsr; } } inline DimVector getBlockSize(Tensor const& self) { int64_t n_batch = numBatchDimensions(self); return at::DimVector(self.values().sizes().slice(n_batch + 1, 2)); } inline at::OptionalArray<at::SymInt> getSymIntBlockSize(Tensor const& self) { if (self.layout() == at::kSparseBsr || self.layout() == at::kSparseBsc) { int64_t n_batch = numBatchDimensions(self); return self.values().sym_sizes().slice(n_batch + 1, 2).vec(); } else { return {}; } } template <typename binary_op_t, typename binary_op_out_t> inline bool only_sparse_compressed_binary_op_trivial_cases( const Tensor& self, const Tensor& other, const Scalar& alpha, Tensor& out, const binary_op_t& binary_op, const binary_op_out_t& binary_op_out) { // Only sparse compressed! Just like the name says :) TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(self)); TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(other)); TORCH_INTERNAL_ASSERT(at::sparse_csr::is_sparse_compressed(out)); // Bypass BLAS if there are matches in (self, other, out) if (self.is_same(out) && self.is_same(other)) { binary_op_out(self.values(), other.values(), alpha); return true; } if (self.is_same(other)) { Tensor compressed_indices, plain_indices; std::tie(compressed_indices, plain_indices) = at::sparse_csr::getCompressedPlainIndices(self); static_cast<SparseCsrTensorImpl*>(out.unsafeGetTensorImpl()) ->set_member_tensors( compressed_indices, plain_indices, binary_op(self.values(), other.values(), alpha), self.sizes()); return true; } return false; } inline bool only_sparse_compressed_add_trivial_cases( const Tensor& self, const Tensor& other, const Scalar& alpha, Tensor& out) { return only_sparse_compressed_binary_op_trivial_cases( self, other, alpha, out, [](const Tensor& v1, const Tensor& v2, const Scalar& alpha) { return v1.add(v2, alpha); }, [](const Tensor& v1, const Tensor& v2, const Scalar& alpha) { return v1.add_(v2, alpha); }); } } // namespace sparse_csr } // namespace at
14,733
38.714286
80
h
null
pytorch-main/aten/src/ATen/StorageUtils.h
#pragma once #include <c10/core/Storage.h> #include <c10/core/StorageImpl.h> #include <c10/util/intrusive_ptr.h> namespace at { class TensorBase; // Here we define a series of utils to create/manipulate ATen backed // c10 storage implementations. /** * Create a new shared memory storage impl managed by file descriptor * * @param size size in bytes */ C10_EXPORT c10::intrusive_ptr<c10::StorageImpl> new_shm_fd_storage(size_t size); /** * Copy src to dst * Caller must guarantee the validness of the storage objects * during the entire copy process, esp. when it's async. * * This can probably live in c10 namespace later if needed, * but for now keep it in at to keep implementation simple. * * @param dst dst tensor * @param src src tensor * @param non_blocking (default false) whether this operation blocks caller */ C10_EXPORT void storage_copy( c10::Storage& dst, const c10::Storage& src, bool non_blocking = false); /** * In place change the storage to shm based. * * This is only applicable to CPU tensors not already shared. * Otherwise, it's a no op to mirror the THP tensor behavior: * https://pytorch.org/docs/stable/generated/torch.Tensor.share_memory_.html * * @param t a tensor */ C10_EXPORT void share_memory_(TensorBase& t); } // namespace at
1,308
25.18
80
h
null
pytorch-main/aten/src/ATen/TensorGeometry.h
#pragma once #include <ATen/core/TensorBase.h> #include <c10/core/WrapDimMinimal.h> namespace at { // Return if the tensor geometry represented by `sizes` and `strides` is // contiguous Although we cache is_contiguous in tensor now, this is till useful // because it allows checking if a particular geometry is contiguous without // explicitly constructing a tensor, e.g., when you want to choose a kernel // strategy based on whether a subgeometry is contiguous. TORCH_API bool geometry_is_contiguous(IntArrayRef sizes, IntArrayRef strides); struct TORCH_API TensorGeometry { TensorGeometry() = default; explicit TensorGeometry(c10::SymIntArrayRef sizes) : sizes_(sizes.vec()), strides_(sizes.size()), has_symbolic_sizes_strides_( !c10::asIntArrayRefSlowOpt(sizes).has_value()) { int64_t dim = sizes.size(); c10::SymInt expected_stride = 1; for (int64_t i = dim - 1; i >= 0; i--) { strides_[i] = expected_stride; expected_stride *= sizes_[i]; } numel_ = expected_stride; } explicit TensorGeometry(const TensorBase& t) : sizes_(t.sym_sizes().vec()), strides_(t.sym_strides().vec()), storage_offset_(t.sym_storage_offset()), numel_(t.sym_numel()), has_symbolic_sizes_strides_( t.unsafeGetTensorImpl()->has_symbolic_sizes_strides()) {} // true if the tensor is contiguous bool is_contiguous() const; int64_t dim() const { return sizes_.size(); } int64_t size(int64_t dim) const { TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); dim = c10::maybe_wrap_dim(dim, this->dim()); return sizes_.at(static_cast<size_t>(dim)).as_int_unchecked(); } c10::IntArrayRef sizes() const { TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); return c10::asIntArrayRefUnchecked(sizes_); } int64_t stride(int64_t dim) const { TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); dim = c10::maybe_wrap_dim(dim, this->dim()); return strides_.at(static_cast<size_t>(dim)).as_int_unchecked(); } c10::IntArrayRef strides() const { TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); return c10::asIntArrayRefUnchecked(strides_); } int64_t storage_offset() const { TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); return storage_offset_.as_int_unchecked(); } int64_t numel() const { TORCH_INTERNAL_ASSERT(!has_symbolic_sizes_strides_); return numel_.as_int_unchecked(); } c10::SymInt sym_size(int64_t dim) const { dim = c10::maybe_wrap_dim(dim, this->dim()); return sizes_.at(static_cast<size_t>(dim)); } c10::SymIntArrayRef sym_sizes() const { return sizes_; } c10::SymInt sym_stride(int64_t dim) const { dim = c10::maybe_wrap_dim(dim, this->dim()); return strides_.at(static_cast<size_t>(dim)); } c10::SymIntArrayRef sym_strides() const { return strides_; } c10::SymInt sym_storage_offset() const { return storage_offset_; } c10::SymInt sym_numel() const { return numel_; } TensorGeometry transpose(int64_t dim0, int64_t dim1) { TensorGeometry r = *this; // copy TORCH_CHECK( dim0 < dim(), "transpose: dim0=", dim0, " out of range (dim=", dim(), ")") TORCH_CHECK( dim1 < dim(), "transpose: dim1=", dim1, " out of range (dim=", dim(), ")") std::swap(r.sizes_[dim0], r.sizes_[dim1]); std::swap(r.strides_[dim0], r.strides_[dim1]); return r; } std::vector<c10::SymInt>& mutable_sizes() { return sizes_; } std::vector<c10::SymInt>& mutable_strides() { return strides_; } c10::SymInt& mutable_storage_offset() { return storage_offset_; } void recompute() { // recalculate numel after a change c10::SymInt numel = 1; for (const auto& i : sizes_) { numel = numel * i; } numel_ = std::move(numel); has_symbolic_sizes_strides_ = !c10::asIntArrayRefSlowOpt(sizes_).has_value(); } private: std::vector<c10::SymInt> sizes_; std::vector<c10::SymInt> strides_; c10::SymInt storage_offset_; c10::SymInt numel_; bool has_symbolic_sizes_strides_{false}; }; } // namespace at
4,229
28.172414
80
h
null
pytorch-main/aten/src/ATen/TensorIndexing.h
#pragma once #include <ATen/ExpandUtils.h> #include <ATen/ScalarOps.h> #include <ATen/core/Tensor.h> #include <ATen/core/TensorBody.h> #include <c10/core/SymInt.h> #include <c10/util/Optional.h> #include <c10/util/irange.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/alias.h> #include <ATen/ops/empty.h> #include <ATen/ops/scalar_tensor.h> #include <ATen/ops/zeros.h> #endif #include <ATen/core/List.h> #include <utility> namespace at { namespace indexing { const int64_t INDEX_MIN = c10::SymInt::min_representable_int(); const int64_t INDEX_MAX = -(INDEX_MIN + 1); enum class TensorIndexType { None, Ellipsis, Integer, Boolean, Slice, Tensor }; constexpr c10::nullopt_t None = c10::nullopt; struct TORCH_API EllipsisIndexType final { EllipsisIndexType() = default; }; TORCH_API extern const EllipsisIndexType Ellipsis; struct TORCH_API Slice final { public: Slice( c10::optional<c10::SymInt> start_index = c10::nullopt, c10::optional<c10::SymInt> stop_index = c10::nullopt, c10::optional<c10::SymInt> step_index = c10::nullopt) { if (!step_index.has_value()) { step_ = c10::SymInt(1); } else { step_ = std::move(step_index).value(); } TORCH_CHECK_VALUE(step_ != 0, "slice step cannot be zero"); if (!start_index.has_value()) { start_ = c10::SymInt(step_ < 0 ? INDEX_MAX : 0); } else { start_ = std::move(start_index).value(); } if (!stop_index.has_value()) { stop_ = c10::SymInt(step_ < 0 ? INDEX_MIN : INDEX_MAX); } else { stop_ = std::move(stop_index).value(); } } inline c10::SymInt start() const { return start_; } inline c10::SymInt stop() const { return stop_; } inline c10::SymInt step() const { return step_; } private: c10::SymInt start_; c10::SymInt stop_; c10::SymInt step_; }; TORCH_API std::ostream& operator<<(std::ostream& stream, const Slice& slice); // `at::indexing::TensorIndex` is used for converting C++ tensor indices such as // `{None, "...", Ellipsis, 0, true, Slice(1, None, 2), torch::tensor({1, 2})}` // into its equivalent `std::vector<TensorIndex>`, so that further tensor // indexing operations can be performed using the supplied indices. // // There is one-to-one correspondence between Python and C++ tensor index types: // Python | C++ // ----------------------------------------------------- // `None` | `at::indexing::None` // `Ellipsis` | `at::indexing::Ellipsis` // `...` | `"..."` // `123` | `123` // `True` / `False` | `true` / `false` // `:` | `Slice()` / `Slice(None, None)` // `::` | `Slice()` / `Slice(None, None, None)` // `1:` | `Slice(1, None)` // `1::` | `Slice(1, None, None)` // `:3` | `Slice(None, 3)` // `:3:` | `Slice(None, 3, None)` // `::2` | `Slice(None, None, 2)` // `1:3` | `Slice(1, 3)` // `1::2` | `Slice(1, None, 2)` // `:3:2` | `Slice(None, 3, 2)` // `1:3:2` | `Slice(1, 3, 2)` // `torch.tensor([1, 2])`) | `torch::tensor({1, 2})` struct TORCH_API TensorIndex final { // Case 1: `at::indexing::None` TensorIndex(c10::nullopt_t) : type_(TensorIndexType::None) {} // Case 2: "..." / `at::indexing::Ellipsis` TensorIndex(at::indexing::EllipsisIndexType) : type_(TensorIndexType::Ellipsis) {} TensorIndex(const char* str) : TensorIndex(at::indexing::Ellipsis) { TORCH_CHECK_VALUE( strcmp(str, "...") == 0, "Expected \"...\" to represent an ellipsis index, but got \"", str, "\""); } // Case 3: Integer value TensorIndex(int64_t integer) : integer_(integer), type_(TensorIndexType::Integer) {} TensorIndex(int integer) : TensorIndex((int64_t)integer) {} // Case 4: Boolean value template < class T, class = typename std::enable_if<std::is_same<bool, T>::value>::type> TensorIndex(T boolean) : boolean_(boolean), type_(TensorIndexType::Boolean) {} // Case 5: Slice represented in `at::indexing::Slice` form TensorIndex(Slice slice) : slice_(std::move(slice)), type_(TensorIndexType::Slice) {} // Case 6: Tensor value TensorIndex(Tensor tensor) : tensor_(std::move(tensor)), type_(TensorIndexType::Tensor) {} inline bool is_none() const { return type_ == TensorIndexType::None; } inline bool is_ellipsis() const { return type_ == TensorIndexType::Ellipsis; } inline bool is_integer() const { return type_ == TensorIndexType::Integer; } inline int64_t integer() const { return integer_; } inline bool is_boolean() const { return type_ == TensorIndexType::Boolean; } inline bool boolean() const { return boolean_; } inline bool is_slice() const { return type_ == TensorIndexType::Slice; } inline const Slice& slice() const { return slice_; } inline bool is_tensor() const { return type_ == TensorIndexType::Tensor; } inline const Tensor& tensor() const { return tensor_; } private: int64_t integer_ = 0; bool boolean_ = false; Slice slice_; Tensor tensor_; TensorIndexType type_; }; TORCH_API std::ostream& operator<<( std::ostream& stream, const TensorIndex& tensor_index); TORCH_API std::ostream& operator<<( std::ostream& stream, const std::vector<TensorIndex>& tensor_indices); namespace impl { static inline Tensor applySlice( const Tensor& self, int64_t dim, c10::SymInt start, c10::SymInt stop, c10::SymInt step, bool disable_slice_optimization, const at::Device& self_device, const c10::optional<SymIntArrayRef>& self_sizes) { // TODO: implement negative step TORCH_CHECK_VALUE(step > 0, "step must be greater than zero"); // See NOTE [nested tensor size for indexing] if (self_sizes.has_value()) { // Skip this optimization if we are tracing, as the trace may be polymorphic // over the shape of the `self` tensor, and we still want to record // the slice. SymInt length = (self_device == at::kCPU || self_device == at::kCUDA) ? (*self_sizes)[dim] : self.sym_size(dim); if (!disable_slice_optimization && start == 0 && length == stop && step == 1) { return self; } } return self.slice_symint(dim, start, stop, std::move(step)); } static inline Tensor applySelect( const Tensor& self, int64_t dim, int64_t index, int64_t real_dim, const at::Device& /*self_device*/, const c10::optional<SymIntArrayRef>& self_sizes) { // See NOTE [nested tensor size for indexing] if (self_sizes.has_value()) { TORCH_CHECK_INDEX( !(index == 0 && dim == 0 && self_sizes->empty()), "invalid index of a 0-dim tensor. ", "Use `tensor.item()` in Python or `tensor.item<T>()` in C++ to convert a 0-dim tensor to a number"); auto size = (*self_sizes)[dim]; TORCH_CHECK_INDEX( size >= -index && size > index, "index ", index, " is out of bounds for dimension ", real_dim, " with size ", size); } // if the index is negative, do not normalize it because that would fix the // index on the current tensor size in the tracer. aten::select also works on // negative indices return self.select(dim, index); } static inline Tensor boolToIndexingTensorCPUOrCUDA( const Tensor& self, bool value) { // booleans add a dimension of size 1. true indexes this dimension as if 0:, // false as empty. if (value) { return at::empty({1}, {}, self.options().dtype(kLong)).fill_(0.); } else { return at::empty({0}, {}, self.options().dtype(kLong)); } } static inline Tensor boolToIndexingTensorNonNativeDeviceType( const Tensor& self, bool value) { // booleans add a dimension of size 1. true indexes this dimension as if 0:, // false as empty. if (value) { return at::zeros({1}, {}, self.options().dtype(kLong)); } else { return at::empty({0}, {}, self.options().dtype(kLong)); } } static inline Tensor boolToIndexingTensor( const Tensor& self, bool value, const at::Device& self_device) { if (self_device == at::kCPU || self_device == at::kCUDA) { return boolToIndexingTensorCPUOrCUDA(self, value); } else { return boolToIndexingTensorNonNativeDeviceType(self, value); } } static inline Tensor scalarToTensorNonNativeDeviceType( const Scalar& v, const TensorOptions& options) { return at::scalar_tensor(v, options); } static inline void recordTensorIndex( const Tensor& tensor, std::vector<Tensor>& outIndices, int64_t* dim_ptr) { // TODO: check scalarType outIndices.resize(*dim_ptr + 1); outIndices[*dim_ptr] = tensor; (*dim_ptr)++; }; static inline c10::List<c10::optional<Tensor>> typeConvertIndices( const Tensor& /*self*/, std::vector<Tensor>&& indices) { c10::List<c10::optional<Tensor>> converted_inds; converted_inds.reserve(indices.size()); for (const auto& i : indices) { converted_inds.push_back(std::move(i)); } return converted_inds; } // NOTE: Why do we mirror instead of replace the `count_specified_dimensions` // function in torch/csrc/autograd/python_variable_indexing.cpp? It's because // `count_specified_dimensions` is on the hot path of Python tensor multi-dim // indexing (i.e. it's called by `applySlicing` which is called by // `THPVariable_getitem` / `THPVariable_setitem` when handling indexing of more // than one dimension). If we were to merge the Python/C++ // `count_specified_dimensions` function, on the Python side we would have to // construct a `std::vector` container to be consumed by the C++ // `count_specified_dimensions` function, which adds 100s of nanoseconds // overhead and is undesirable. static inline int64_t count_specified_dimensions( const ArrayRef<TensorIndex>& indices) { // Count the number of indexed dimensions (everything but ellipsis and None) int64_t count = 0; for (auto& obj : indices) { if (obj.is_tensor()) { auto& tensor = obj.tensor(); if (tensor.scalar_type() == kByte || tensor.scalar_type() == kBool) { count += tensor.dim(); } else { count++; } } else if (!obj.is_none() && !obj.is_ellipsis() && !obj.is_boolean()) { count++; } } return count; } } // namespace impl // NOTE: Many functions below are only for consumption from Python indexing // implementation, they include: // // - `Tensor scalarToTensor(...)` // - `IntArrayRef slicePrefix1sSize(...)` // - `void copy_to(...)` // - `Tensor handleDimInMultiDimIndexing(...)` // - `Tensor dispatch_index(...)` // - `Tensor dispatch_index_put_(...)` // - `Tensor get_item(...)` // - `void set_item(...)` // // The rest of the functions are in `at::indexing::impl` namespace, signifying // that they shouldn't be used from Python indexing implementation. static inline Tensor scalarToTensor( const Scalar& v, const TensorOptions& options, const at::Device& self_device) { if (self_device == at::kCPU) { return at::detail::scalar_tensor_static( v, options.dtype_opt()->toScalarType(), self_device); } else { return impl::scalarToTensorNonNativeDeviceType(v, options); } } // To match numpy semantics: // As a special case for backwards compatibility, // strip away unit dimensions from the left of 'src' static inline SymIntArrayRef slicePrefix1sSize(const SymIntArrayRef& sizes) { size_t first_non1_src = sizes.size(); for (const auto i : c10::irange(sizes.size())) { // Unbacked SymInt has different behavior, but this is sound because // failing to slice will only ever cause an error, not divergent // behavior if (!sizes[i].has_hint() || sizes[i] != 1) { first_non1_src = i; break; } } return sizes.slice(first_non1_src); } static inline void copy_to(const Tensor& dst, const Tensor& src) { if (dst.sym_sizes().equals(src.sym_sizes())) { // A shortcut to avoid generating hard-coded constant sizes during tracing. // This is not a perfect solution: when src & dst have different shapes, // constants will still appear. Users can workaround that case by // dst[index..] = src.reshape(..) dst.copy_(src); return; } else if (src.dim() == 0 && src.device().type() == at::kCPU) { dst.fill_(src); return; } auto src_view = src.view_symint(slicePrefix1sSize(src.sym_sizes())); c10::MaybeOwned<Tensor> b_src = expand_inplace(dst, src_view, "setitem"); dst.copy_(*b_src); } // See NOTE [ Setting `disable_slice_optimization` when calling C++ tensor // indexing functions from Python ] static inline Tensor handleDimInMultiDimIndexing( const Tensor& prev_dim_result, const Tensor& original_tensor, const TensorIndex& index, int64_t* dim_ptr, int64_t* specified_dims_ptr, int64_t real_dim, std::vector<Tensor>& outIndices, bool disable_slice_optimization, const at::Device& original_tensor_device, const c10::optional<SymIntArrayRef>& prev_dim_result_sizes) { if (index.is_integer()) { return impl::applySelect( prev_dim_result, *dim_ptr, index.integer(), real_dim, original_tensor_device, prev_dim_result_sizes); } else if (index.is_slice()) { Tensor result = impl::applySlice( prev_dim_result, *dim_ptr, index.slice().start(), index.slice().stop(), index.slice().step(), /*disable_slice_optimization=*/disable_slice_optimization, original_tensor_device, prev_dim_result_sizes); (*dim_ptr)++; return result; } else if (index.is_ellipsis()) { (*dim_ptr) += original_tensor.dim() - (*specified_dims_ptr); return prev_dim_result; } else if (index.is_none()) { Tensor result = prev_dim_result.unsqueeze(*dim_ptr); (*dim_ptr)++; return result; } else if (index.is_boolean()) { Tensor result = prev_dim_result.unsqueeze(*dim_ptr); impl::recordTensorIndex( impl::boolToIndexingTensor( result, index.boolean(), original_tensor_device), outIndices, dim_ptr); return result; } else if (index.is_tensor()) { Tensor result = prev_dim_result; const Tensor& tensor = index.tensor(); auto scalar_type = tensor.scalar_type(); if (tensor.dim() == 0 && at::isIntegralType(scalar_type, /*includeBool=*/true)) { if (scalar_type != at::kByte && scalar_type != at::kBool) { result = impl::applySelect( result, *dim_ptr, tensor.item<int64_t>(), real_dim, original_tensor_device, prev_dim_result_sizes); } else { result = result.unsqueeze(*dim_ptr); if (scalar_type == at::kBool) { impl::recordTensorIndex( impl::boolToIndexingTensor( result, tensor.item<bool>() != 0, original_tensor_device), outIndices, dim_ptr); } else { impl::recordTensorIndex( impl::boolToIndexingTensor( result, tensor.item<uint8_t>() != 0, original_tensor_device), outIndices, dim_ptr); } } } else { impl::recordTensorIndex(tensor, outIndices, dim_ptr); } return result; } else { TORCH_INTERNAL_ASSERT(false, "Invalid TensorIndex type"); } } namespace impl { // This mirrors `applySlicing` in // torch/csrc/autograd/python_variable_indexing.cpp static inline Tensor applySlicing( const Tensor& self, const ArrayRef<TensorIndex>& indices, std::vector<Tensor>& outIndices, bool disable_slice_optimization, const at::Device& self_device, const c10::optional<SymIntArrayRef>& self_sizes) { int64_t dim = 0; int64_t specified_dims = impl::count_specified_dimensions(indices); // See NOTE [nested tensor size for indexing] if (self_sizes.has_value()) { TORCH_CHECK_INDEX( specified_dims <= (int64_t)self_sizes->size(), "too many indices for tensor of dimension ", (int)self_sizes->size()); } Tensor result = self; for (const auto i : c10::irange(indices.size())) { auto& obj = indices[i]; // See NOTE [nested tensor size for indexing] c10::optional<SymIntArrayRef> result_sizes = result.is_nested() ? c10::optional<SymIntArrayRef>(c10::nullopt) : c10::optional<SymIntArrayRef>(result.sym_sizes()); result = handleDimInMultiDimIndexing( /*prev_dim_result=*/result, /*original_tensor=*/self, /*index=*/obj, /*dim=*/&dim, /*specified_dims=*/&specified_dims, /*real_dim=*/i, /*outIndices=*/outIndices, /*disable_slice_optimization=*/disable_slice_optimization, /*original_tensor_device=*/self_device, /*prev_dim_result_sizes=*/result_sizes); } return result; } } // namespace impl static inline Tensor dispatch_index( const Tensor& self, std::vector<Tensor>&& indices) { return self.index(impl::typeConvertIndices(self, std::move(indices))); } static inline Tensor dispatch_index_put_( Tensor& self, std::vector<Tensor>&& indices, const Tensor& value) { return self.index_put_( impl::typeConvertIndices(self, std::move(indices)), value); } // NOTE [ Setting `disable_slice_optimization` when calling C++ tensor indexing // functions from Python ] // // Question: When should we set `disable_slice_optimization` to `true` when // calling C++ tensor indexing functions from Python indexing code? // // Answer: What "slice optimization" means: when we have a slicing expression // like `x[0:5, 0]`, where the sliced tensor was of size 5 in dimension 0, we // would skip dispatching the actual slice call as an optimization. However, // here are the cases where we DON'T want this optimization: // // 1. When we are doing 1-D slicing (e.g. `tensor[:]`). // Reason: we always return a shallow copy for expressions such as // `tensor[:]` / `tensor[...]` / `tensor[:, :]`. (Note that for `tensor[:, // :]`, we return an alias of `tensor` by doing the following: // ``` // Tensor sliced = impl::applySlicing(self, indices, tensorIndices, // disable_slice_optimization, self_device, self_sizes); if // (tensorIndices.empty()) { // if (sliced.is_same(self)) { // // ensure we return a shallow copy for things like x[...] // sliced = at::alias(sliced); // } // return sliced; // } // ```) // 2. When we are doing JIT tracing. // Reason: JIT tracing needs the `self.slice(...)` call to properly trace the // slice operation. // This mirrors `THPVariable_getitem` in // torch/csrc/autograd/python_variable_indexing.cpp See NOTE [ Setting // `disable_slice_optimization` when calling C++ tensor indexing functions from // Python ] static inline Tensor get_item( const Tensor& self, const ArrayRef<TensorIndex>& indices, bool disable_slice_optimization = false) { at::Device self_device = self.device(); // NOTE [nested tensor size for indexing] // nested tensor does not have a size (yet) so for now we represent its size // as null may need to be changed after we reach a better solution for nested // tensor size c10::optional<SymIntArrayRef> self_sizes = self.is_nested() ? c10::optional<SymIntArrayRef>(c10::nullopt) : c10::optional<SymIntArrayRef>(self.sym_sizes()); // handle simple types: integers, slices, none, ellipsis, bool if (indices.size() == 1) { const TensorIndex& index = indices[0]; if (index.is_integer()) { return impl::applySelect( self, 0, index.integer(), 0, self_device, self_sizes); } else if (index.is_slice()) { return impl::applySlice( self, 0, index.slice().start(), index.slice().stop(), index.slice().step(), /*disable_slice_optimization=*/true, self_device, self_sizes); } else if (index.is_none()) { return self.unsqueeze(0); } else if (index.is_ellipsis()) { return at::alias(self); } else if (index.is_boolean()) { Tensor result = self.unsqueeze(0); return dispatch_index( result, std::vector<Tensor>{impl::boolToIndexingTensor( result, index.boolean(), self_device)}); } } std::vector<Tensor> tensorIndices; Tensor sliced = impl::applySlicing( self, indices, tensorIndices, disable_slice_optimization, self_device, self_sizes); if (tensorIndices.empty()) { if (sliced.is_same(self)) { // ensure we return a shallow copy for things like x[...] sliced = at::alias(sliced); } return sliced; } // indexing by tensors ("advanced" indexing) return dispatch_index(sliced, std::move(tensorIndices)); } // This mirrors `THPVariable_setitem` in // torch/csrc/autograd/python_variable_indexing.cpp for "the assigned value is a // Tensor" case See NOTE [ Setting `disable_slice_optimization` when calling C++ // tensor indexing functions from Python ] static inline void set_item( const Tensor& self, const ArrayRef<TensorIndex>& indices, const Tensor& value, bool disable_slice_optimization = false) { at::Device self_device = self.device(); SymIntArrayRef self_sizes = self.sym_sizes(); // handle simple types: integers, slices, ellipsis, bool if (indices.size() == 1) { const TensorIndex& index = indices[0]; if (index.is_boolean() && !index.boolean()) { // do nothing for false (technically we should check the size, but we // don't have real 0-sized shapes. return; } else if (index.is_ellipsis()) { copy_to(self, value); return; } else if (index.is_none() || (index.is_boolean() && index.boolean())) { copy_to(self.unsqueeze(0), value); return; } else if (index.is_integer()) { copy_to( impl::applySelect( self, 0, index.integer(), 0, self_device, self_sizes), value); return; } else if (index.is_slice()) { copy_to( impl::applySlice( self, 0, index.slice().start(), index.slice().stop(), index.slice().step(), /*disable_slice_optimization=*/disable_slice_optimization, self_device, self_sizes), value); return; } } std::vector<Tensor> tensorIndices; Tensor sliced = impl::applySlicing( self, indices, tensorIndices, disable_slice_optimization, self_device, self_sizes); if (tensorIndices.empty()) { copy_to(sliced, value); return; } SymIntArrayRef valueSizes = value.sym_sizes(); SymIntArrayRef slicedValueSizes = slicePrefix1sSize(valueSizes); Tensor valuesSliced; if (!valueSizes.equals(slicedValueSizes)) { valuesSliced = value.view_symint(slicedValueSizes); } else { valuesSliced = value; } dispatch_index_put_(sliced, std::move(tensorIndices), valuesSliced); return; } } // namespace indexing } // namespace at
23,337
31.013717
108
h
null
pytorch-main/aten/src/ATen/TensorIteratorInternal.h
#pragma once #include <ATen/native/TensorIterator.h> #include <c10/util/SmallBuffer.h> #include <c10/util/irange.h> namespace at { struct DimCounter { DimCounter(IntArrayRef shape, Range range); void increment(const std::array<int64_t, 2>& step); bool is_done() const; std::array<int64_t, 2> max_2d_step() const; IntArrayRef shape; Range range; c10::SmallBuffer<int64_t, 4> values; int64_t offset; }; namespace internal { inline void get_data_ptrs( char** ptrs, ArrayRef<char*> base, IntArrayRef strides, IntArrayRef counter) { const int64_t ntensors = base.size(); const int64_t ndim = counter.size(); std::copy(base.begin(), base.end(), ptrs); for (const auto dim : c10::irange(ndim)) { int64_t value = counter[dim]; for (const auto arg : c10::irange(ntensors)) { ptrs[arg] += value * strides[dim * ntensors + arg]; } } } inline void serial_for_each( IntArrayRef shape, IntArrayRef strides, char** base_ptrs, size_t ntensors, typename TensorIteratorBase::loop2d_t loop, Range range) { const auto ndim = shape.size(); TORCH_INTERNAL_ASSERT_DEBUG_ONLY( strides.size() == ntensors * std::max(size_t{2}, ndim)); if (ndim <= 1) { if (range.begin == 0) { loop(base_ptrs, strides.data(), range.size(), 1); } else { c10::SmallBuffer<char*, 4> ptrs(ntensors); get_data_ptrs(ptrs.data(), {base_ptrs, ntensors}, strides, {range.begin}); loop(ptrs.data(), strides.data(), range.size(), 1); } } else { c10::SmallBuffer<char*, 4> ptrs(ntensors); auto counter = DimCounter(shape, range); while (!counter.is_done()) { get_data_ptrs( ptrs.data(), {base_ptrs, ntensors}, strides, counter.values); auto step = counter.max_2d_step(); loop(ptrs.data(), strides.data(), step[0], step[1]); counter.increment(step); } } } } // namespace internal } // namespace at
1,937
25.547945
80
h
null
pytorch-main/aten/src/ATen/TensorMeta.h
#pragma once #include <ATen/DimVector.h> #include <ATen/core/Dimname.h> #include <c10/core/TensorOptions.h> #include <c10/util/strides.h> C10_CLANG_DIAGNOSTIC_PUSH() #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy-dtor") C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy-dtor") #endif namespace at { class Tensor; namespace impl { // Use this to define the prototype for a meta function. There are two // versions; one that takes one argument (just the operator name), or FUNC2 // variant that takes two arguments (operator name and overload name). // // Example usage: // // TORCH_META_FUNC2(add, Tensor) ( // const Tensor& self, const Tensor& other // ) { // ... compute sizes and options ... // set_output(sizes, options); // } // #define TORCH_META_FUNC(name) void structured_##name::meta #define TORCH_META_FUNC2(name, overload) \ void structured_##name##_##overload::meta // These are versions of TORCH_META_FUNC(2) that include a precompute_out struct // as a return value. They should be used when the kernel in question has // precomputed values declared in native_functions.yaml and the corresponding // implementation should return an instance of the aforementioned struct. #define TORCH_PRECOMPUTE_META_FUNC(name) \ structured_##name::meta_return_ty structured_##name::meta #define TORCH_PRECOMPUTE_META_FUNC2(name, overload) \ structured_##name##_##overload::meta_return_ty \ structured_##name##_##overload::meta // Use this to create a precompute struct in a meta function. #define TORCH_PRECOMPUTE_STRUCT(name) structured_##name::precompute_out<> #define TORCH_PRECOMPUTE_STRUCT2(name, overload) \ structured_##name##_##overload::precompute_out<> // Use this to define the prototype for an implementation. This takes only // one argument, which is the name of the dispatch key entry you're // implementing. // // Example usage: // // TORCH_IMPL_FUNC(add_cpu) ( // Tensor& result, const Tensor& self, const Tensor& other // ) { // ... do the actual implementation ... // } // #define TORCH_IMPL_FUNC(name) void structured_##name::impl // Base class for all structured kernel classes. The set_output virtual // method is varied depending whether or not the operator is // functional/out/inplace, and could also be specialized for CPU/CUDA/etc // (although presently it isn't). // // A notable subclass of this interface is TensorIteratorBase. struct TORCH_API MetaBase { virtual const Tensor& maybe_get_output(int64_t output_idx) = 0; // Note: [set_output_*] // See: https://github.com/pytorch/pytorch/issues/69813 // Whenever defining the output properties in the META function of a // structured kernel (what was usually done with `set_output`), use one of // these 3 variants, instead. In order to decide which variant to use, check // the following decision tree: // // - Can the kernel you are going to implement support output tensors // with arbitrary strides? // | // -- YES: `set_output_raw_strided` // | // -- NO: Should the output tensor strides be contiguous? // | // -- YES: `set_output_contiguous` // | // -- NO: `set_output_strided` // // Use this function whenever the kernel requires specific strides for the // output. If `strides` does not match the given output strides, proxy outputs // will be created and passed to the IMPL function. virtual void set_output_strided( int64_t output_idx, IntArrayRef sizes, IntArrayRef strides, TensorOptions options, DimnameList names = {}) { TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); } // Use this function whenever the kernel knows how to handle arbitrary strided // outputs. This function has the same behavior as the old `set_output`: it // will only re-stride if the given output was resized. virtual void set_output_raw_strided( int64_t output_idx, IntArrayRef sizes, IntArrayRef strides_hint, TensorOptions options, DimnameList names = {}) { TORCH_INTERNAL_ASSERT(false, "set_output_strided not implemented."); } // Use this function if the kernel requires contiguous strides. // Alias for `set_output_strided`, but with contiguous strides. void set_output_contiguous( int64_t output_idx, IntArrayRef sizes, TensorOptions options, DimnameList names = {}) { auto strides = c10::contiguous_strides(sizes); set_output_strided(output_idx, sizes, strides, options, names); } // Returns a reference to an undefined tensor if there is no presupplied // output const Tensor& maybe_get_output() { return maybe_get_output(0); } virtual ~MetaBase() = default; }; } // namespace impl } // namespace at C10_CLANG_DIAGNOSTIC_POP()
4,824
33.464286
80
h
null
pytorch-main/aten/src/ATen/TensorNames.h
#pragma once #include <ATen/WrapDimUtils.h> namespace at { namespace namedinference { // TensorName and TensorNames are wrappers around Dimname and DimnameList // that contain helper functions to make writing name inference rules easier. // // A TensorName represents a Dimname associated with some DimnameList (from a // Tensor). This encapsulates all the information that is needed to check if // names *match* and to *unify* names. // // Definition: Two names in two tensors *match* if they are equal, or if at // least one of them is a wildcard that can be *refined* to the other name. // // Definition: unify(name, other) fails if the names do not match. Otherwise, // it returns the most refined of name and other. // // Here is an example of checking if two names match. // tensor: Tensor[A, None] // other: Tensor[A] // // Let's say we wish to check if tensor.names[-1] matches other.names[-1]. // None (in tensor) cannot match A (in other) because if the None were refined // to A, `tensor` would have duplicate names [A, A]. Therefore we need to check // tensor.names [A, None] for the existence of A. struct TORCH_API TensorName { explicit TensorName(ArrayRef<Dimname> origin, int origin_idx) : origin_(origin), name_(origin[maybe_wrap_dim(origin_idx, origin.size())]), origin_idx_(origin_idx) {} // op_name is only used for error reporting. const TensorName& unify(const TensorName& other, const char* op_name) const; Dimname toDimname() const; private: ArrayRef<Dimname> origin_; Dimname name_; int origin_idx_; // A named tensor can have at most 64 dims. TORCH_API friend std::ostream& operator<<( std::ostream& out, const TensorName& tensorname); }; using TensorNameVec = SmallVector<TensorName, 10>; struct TORCH_API TensorNames { explicit TensorNames(ArrayRef<Dimname> names); // Create TensorNames from names[start:end]. Each individual TensorName stores // `names`, NOT names[start:end], because the original tensor's names are // `names`. explicit TensorNames(ArrayRef<Dimname> names, int64_t start, int64_t end); // op_name is only used for error reporting. TensorNames& unifyFromRightInplace( const TensorNames& other, const char* op_name = "unify"); void checkUnique(const char* op_name) const; void append(TensorName&& name); std::vector<Dimname> toDimnameVec() const; private: explicit TensorNames(TensorNameVec&& names) : names_(names){}; TensorNameVec names_; }; } // namespace namedinference } // namespace at
2,540
32.434211
80
h
null
pytorch-main/aten/src/ATen/TensorOperators.h
#pragma once #include <ATen/core/Tensor.h> #include <c10/core/Scalar.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/empty_like.h> #endif #include <stdexcept> #include <string> namespace at { #define AT_FORALL_BINARY_OPS(_) \ _(+, x.add(y), y.add(x)) \ _(*, x.mul(y), y.mul(x)) \ _(-, \ x.sub(y), \ ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).sub_(y)) \ _(/, \ x.div(y), \ ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).div_(y)) \ _(%, \ x.remainder(y), \ ::at::empty_like(y, at::MemoryFormat::Preserve).fill_(x).remainder_(y)) \ _(&, x.bitwise_and(y), y.bitwise_and(x)) \ _(|, x.bitwise_or(y), y.bitwise_or(x)) \ _(^, x.bitwise_xor(y), y.bitwise_xor(x)) \ _(<, x.lt(y), y.gt(x)) \ _(<=, x.le(y), y.ge(x)) \ _(>, x.gt(y), y.lt(x)) \ _(>=, x.ge(y), y.le(x)) \ _(==, x.eq(y), y.eq(x)) \ _(!=, x.ne(y), y.ne(x)) #define DEFINE_OPERATOR(op, body, reverse_scalar_body) \ static inline Tensor operator op(const Tensor& x, const Tensor& y) { \ return body; \ } \ static inline Tensor operator op(const Tensor& x, const Scalar& y) { \ return body; \ } \ static inline Tensor operator op(const Scalar& x, const Tensor& y) { \ return reverse_scalar_body; \ } AT_FORALL_BINARY_OPS(DEFINE_OPERATOR) #undef DEFINE_OPERATOR #undef AT_FORALL_BINARY_OPS } // namespace at
2,594
46.181818
77
h
null
pytorch-main/aten/src/ATen/TensorSubclassLikeUtils.h
#pragma once #include <ATen/core/List.h> #include <ATen/core/Tensor.h> #include <c10/core/impl/TorchDispatchModeTLS.h> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #else #include <ATen/ops/equal.h> #endif namespace at { // Note [Tensor-subclass-like Tensors] // Tensor-subclass-like is defined as: // - a Tensor subclass (via __torch_dispatch__ in Python or extending // TensorImpl in C++) // - anything else that shares the same perils as Tensor subclasses. // For example, many Tensor subclasses do not have storage and meta Tensors // do not have storage either, so meta Tensors belong here. // // We should ensure that PyTorch internals supports Tensor-subclass-like // objects. In particular, Tensor-subclass-like objects struggle with two // classes of operations that are problematic for Tensor subclasses: // 1. Because some Tensor subclasses do not have storage, .item() or // .data_ptr() calls are not good. // 2. Certain in-place operations can eliminate the typing of the Tensor // subclass. For example: // >>> torch.zeros(input.sizes(), grad.options()).diag().copy_(input) // If input is a Tensor subclass, then the above ends up either erroring out // or returning a regular non-Tensor-subclass Tensor! constexpr auto kFunctorchWrappedTensors = DispatchKeySet( {DispatchKey::FuncTorchGradWrapper, DispatchKey::FuncTorchBatched, DispatchKey::Functionalize}); constexpr auto kTensorSubclassLike = kFunctorchWrappedTensors | DispatchKeySet( {// WARNING: DO NOT put combined backend component + functionality keys // here, you will incorrectly always match on the functionality key // no matter the backend component DispatchKey::Batched, DispatchKey::Sparse, DispatchKey::SparseCsrCPU, DispatchKey::SparseCsrCUDA, DispatchKey::Python}) | DispatchKeySet(BackendComponent::MetaBit); inline bool isTensorSubclassLike(const Tensor& tensor) { if (c10::impl::dispatch_mode_enabled()) return true; auto key_set = tensor.unsafeGetTensorImpl()->key_set(); return !(key_set & kTensorSubclassLike).empty(); } inline bool areAnyTensorSubclassLike(TensorList tensors) { if (c10::impl::dispatch_mode_enabled()) return true; return std::any_of(tensors.begin(), tensors.end(), isTensorSubclassLike); } inline bool areAnyOptionalTensorSubclassLike( const c10::List<c10::optional<Tensor>>& tensors) { if (c10::impl::dispatch_mode_enabled()) return true; return std::any_of( tensors.begin(), tensors.end(), [](const optional<Tensor>& opt_tensor) { return ( opt_tensor.has_value() && isTensorSubclassLike(opt_tensor.value())); }); } // Helper function to deal testing truthfulness of a scalar tensor // in a Composite Compliant manner. // NOTE: This function expects a scalar tensor of boolean dtype. // Eg. // Non-Composite Compliant Pattern : (t == 0).all().item<bool>() // Composite Compliant Patter : is_salar_tensor_true((t == 0).all()) inline bool is_scalar_tensor_true(const Tensor& t) { TORCH_INTERNAL_ASSERT(t.dim() == 0) TORCH_INTERNAL_ASSERT(t.scalar_type() == kBool) return at::equal(t, t.new_ones({}, t.options())); } } // namespace at
3,252
35.965909
80
h
null
pytorch-main/aten/src/ATen/TensorUtils.h
#pragma once #include <ATen/DimVector.h> #include <ATen/EmptyTensor.h> #include <ATen/Tensor.h> #include <ATen/TensorGeometry.h> #include <ATen/Utils.h> #include <utility> // These functions are NOT in Utils.h, because this file has a dep on Tensor.h #define TORCH_CHECK_TENSOR_ALL(cond, ...) \ TORCH_CHECK((cond)._is_all_true().item<bool>(), __VA_ARGS__); namespace at { // The following are utility functions for checking that arguments // make sense. These are particularly useful for native functions, // which do NO argument checking by default. struct TORCH_API TensorArg { const Tensor& tensor; const char* name; int pos; // 1-indexed TensorArg(const Tensor& tensor, const char* name, int pos) : tensor(tensor), name(name), pos(pos) {} // Try to mitigate any possibility of dangling reference to temporaries. TensorArg(Tensor&& tensor, const char* name, int pos) = delete; const Tensor* operator->() const { return &tensor; } const Tensor& operator*() const { return tensor; } }; struct TORCH_API TensorGeometryArg { TensorGeometry tensor; const char* name; int pos; // 1-indexed /* implicit */ TensorGeometryArg(TensorArg arg) : tensor(TensorGeometry{arg.tensor}), name(arg.name), pos(arg.pos) {} TensorGeometryArg(TensorGeometry tensor, const char* name, int pos) : tensor(std::move(tensor)), name(name), pos(pos) {} const TensorGeometry* operator->() const { return &tensor; } const TensorGeometry& operator*() const { return tensor; } }; // A string describing which function did checks on its input // arguments. // TODO: Consider generalizing this into a call stack. using CheckedFrom = const char*; // The undefined convention: singular operators assume their arguments // are defined, but functions which take multiple tensors will // implicitly filter out undefined tensors (to make it easier to perform // tests which should apply if the tensor is defined, and should not // otherwise.) // // NB: This means that the n-ary operators take lists of TensorArg, // not TensorGeometryArg, because the Tensor to TensorGeometry // conversion will blow up if you have undefined tensors. TORCH_API std::ostream& operator<<(std::ostream& out, TensorGeometryArg t); TORCH_API void checkDim( CheckedFrom c, const Tensor& tensor, const char* name, int pos, // 1-indexed int64_t dim); TORCH_API void checkDim(CheckedFrom c, const TensorGeometryArg& t, int64_t dim); // NB: this is an inclusive-exclusive range TORCH_API void checkDimRange( CheckedFrom c, const TensorGeometryArg& t, int64_t dim_start, int64_t dim_end); TORCH_API void checkSameDim( CheckedFrom c, const TensorGeometryArg& t1, const TensorGeometryArg& t2); TORCH_API void checkContiguous(CheckedFrom c, const TensorGeometryArg& t); TORCH_API void checkAllContiguous(CheckedFrom c, at::ArrayRef<TensorArg> ts); TORCH_API void checkSize( CheckedFrom c, const TensorGeometryArg& t, IntArrayRef sizes); TORCH_API void checkSize_symint( CheckedFrom c, const TensorGeometryArg& t, c10::SymIntArrayRef sizes); TORCH_API void checkSize( CheckedFrom c, const TensorGeometryArg& t, int64_t dim, int64_t size); TORCH_API void checkSize_symint( CheckedFrom c, const TensorGeometryArg& t, int64_t dim, c10::SymInt size); TORCH_API void checkNumel( CheckedFrom c, const TensorGeometryArg& t, int64_t numel); TORCH_API void checkSameNumel( CheckedFrom c, const TensorArg& t1, const TensorArg& t2); TORCH_API void checkAllSameNumel(CheckedFrom c, ArrayRef<TensorArg> tensors); TORCH_API void checkScalarType(CheckedFrom c, const TensorArg& t, ScalarType s); TORCH_API void checkScalarTypes( CheckedFrom c, const TensorArg& t, at::ArrayRef<ScalarType> l); TORCH_API void checkSameGPU( CheckedFrom c, const TensorArg& t1, const TensorArg& t2); TORCH_API void checkAllSameGPU(CheckedFrom c, ArrayRef<TensorArg> tensors); TORCH_API void checkSameType( CheckedFrom c, const TensorArg& t1, const TensorArg& t2); TORCH_API void checkAllSameType(CheckedFrom c, ArrayRef<TensorArg> tensors); TORCH_API void checkSameSize( CheckedFrom c, const TensorArg& t1, const TensorArg& t2); TORCH_API void checkAllSameSize(CheckedFrom c, ArrayRef<TensorArg> tensors); TORCH_API void checkDefined(CheckedFrom c, const TensorArg& t); TORCH_API void checkAllDefined(CheckedFrom c, at::ArrayRef<TensorArg> t); // FixMe: does TensorArg slow things down? TORCH_API void checkBackend( CheckedFrom c, at::ArrayRef<Tensor> t, at::Backend backend); TORCH_API void checkDeviceType( CheckedFrom c, at::ArrayRef<Tensor> tensors, at::DeviceType device_type); TORCH_API void checkLayout(CheckedFrom c, const Tensor& t, Layout layout); TORCH_API void checkLayout( CheckedFrom c, at::ArrayRef<Tensor> tensors, at::Layout layout); // Methods for getting data_ptr if tensor is defined TORCH_API void* maybe_data_ptr(const Tensor& tensor); TORCH_API void* maybe_data_ptr(const TensorArg& tensor); TORCH_API void check_dim_size( const Tensor& tensor, int64_t dim, int64_t dim_size, int64_t size); namespace detail { TORCH_API std::vector<int64_t> defaultStrides(IntArrayRef sizes); TORCH_API c10::optional<std::vector<int64_t>> computeStride( IntArrayRef oldshape, IntArrayRef oldstride, IntArrayRef newshape); TORCH_API c10::optional<SymDimVector> computeStride( c10::SymIntArrayRef oldshape, c10::SymIntArrayRef oldstride, c10::SymIntArrayRef newshape); TORCH_API c10::optional<DimVector> computeStride( IntArrayRef oldshape, IntArrayRef oldstride, const DimVector& newshape); } // namespace detail } // namespace at
5,792
29.97861
80
h
null
pytorch-main/aten/src/ATen/ThreadLocalPythonObjects.h
#pragma once #include <c10/core/SafePyObject.h> #include <c10/macros/Macros.h> #include <unordered_map> namespace at { namespace impl { struct TORCH_API ThreadLocalPythonObjects { static void set(const std::string& key, std::shared_ptr<SafePyObject> value); static const std::shared_ptr<SafePyObject>& get(const std::string& key); static bool contains(const std::string& key); static const ThreadLocalPythonObjects& get_state(); static void set_state(ThreadLocalPythonObjects state); private: std::unordered_map<std::string, std::shared_ptr<c10::SafePyObject>> obj_dict_; }; } // namespace impl } // namespace at
632
25.375
80
h
null
pytorch-main/aten/src/ATen/TracerMode.h
#pragma once #include <c10/core/impl/LocalDispatchKeySet.h> #include <c10/macros/Export.h> #include <c10/macros/Macros.h> // NOTE [Tracing Mode Switches] // // Historically, tracing function was controlled by two switches: // // - `AutoDispatchBelowADInplaceOrView` guard // // Tracing function used to be script-generated inside `VariableType_*.cpp` // kernels, sharing the same `Autograd` dispatch key with autograd function. // Therefore, before tracing function was moved out of VariableType, // `AutoDispatchBelowADInplaceOrView` guard can also disable tracing as a // side effect of disabling `Autograd` dispatching. // // - `setTracingState()` API in `torch/csrc/jit/frontend/tracer.h` // // It stores tracing data in a `TracingState` object in TLS. If the // `TracingState` object in TLS is `null`, then tracing is paused. // // The `TracingState` object is created in `tracer::trace()` - the main // entrance of tracing function. It's temporarily set to `null` inside // generated VariableType (now TraceType) to bypass tracing for intermediate // ops (ops being called by other ops). After the intermediate op call // finishes it's set back to the original `TracingState` object. // // The `TracingState` obect in TLS can also be read/written via its Python // binding in `python_tracer.cpp`, and `get/setTracingState()` C++ APIs, // which are also exposed as `TORCH_API`. // // Two new switches were introduced since tracing function was moved out of // VariableType: // // - `tracer::impl::set_dispatch_enabled()` API // // Unlike the special `Autograd` dispatch key which is included in dispatch // key set by default, `Tracer` dispatch key is off by default. The // dispatching switch can be toggled via this new API. // // - `tracer::impl::NoTracerDispatchMode` guard // // It's used to cover the old semantics of `AutoDispatchBelowADInplaceOrView` // after tracing was moved out of VariableType. // // Before tracing function was moved out of VariableType, tracing was enabled // when the following conditions are satisfied: // // 1) `TracingState` object in TLS != null; // - Either inside the execution scope of `tracer::trace()`, or // - Eagerly called `setTracingState()` with non-null object. // 2) Not inside `AutoDispatchBelowADInplaceOrView` scope; // // After: // // 1) `TracingState` object in TLS != null; // 2) Has called `tracer::impl::set_dispatch_enabled(true)`; // 3) Not inside `tracer::impl::NonDispatchGuard` scope; // // [TODOs] // // - `setTracingState()` v.s. `tracer::impl::set_dispatch_enabled()` // // Currently `set_dispatch_enabled()` is set/unset inside `setTracingState()` // to keep the semantics exactly the same as before - it's confusing to keep // both switches, though. We should consider simplifying/limiting the exposed // `setTracingState()` Python/C++ APIs (and other APIs calling it) so that // these two can be unified. // // - `AutoDispatchBelowADInplaceOrView` v.s. // `tracer::impl::NoTracerDispatchMode` // // We don't need to always set both guards together to keep semantics // unchanged. For the follow use cases of `AutoDispatchBelowADInplaceOrView` // we don't need set the new tracer guard: // // * Script-generated VariableType kernels. The guard is not necessary as // tracing is already disabled explicitly by `setTracingState(null)` in // generated TraceType kernels - we could keep it as is or use the new guard // instead. // // * Custom ops. Will be handled by fallback kernel for `Tracer`. // // * Functions that are not likely to be called in tracing context (no python // binding / not an operator), e.g.: all mobile forward() wrappers, test // binaries, and etc. // // * Where new threads are spawned, e.g.: ATen/native/ConvolutionMM2d.cpp. // It's not necessary as tracing is off by default. // // For the rest of cases we might need have both: // // * Functions that might be reachable from eager mode python (especially // factory methods), e.g.: // `internal_new_from_data()` in `torch/csrc/utils/tensor_new.cpp`. // Without the new guard it will add `aten::empty` to the traced graph. // // * Some manually maintained functions, e.g.: // `torch/csrc/autograd/VariableTypeManual.cpp`. // Set the new guard if it's not obvious whether `setTracingState(null)` // has been called before it reaches the `AutoDispatchBelowADInplaceOrView` // guard. // // We might need tweak the usage of the new guard to optimize/fix things. // It should only affect the correctness of tracing function, because the // guard is essentially no-op when the master `setTracingState()` switch is // off. namespace at { // TODO: move this from `at::` to `jit::torch::` after // `aten/src/ATen/cpp_custom_type_hack.h` is removed. namespace tracer { namespace impl { static inline bool is_dispatch_enabled() { return c10::impl::tls_is_dispatch_key_included(at::DispatchKey::Tracer) && !c10::impl::tls_is_dispatch_key_excluded(at::DispatchKey::Tracer); } static inline void set_dispatch_enabled(bool enabled) { TORCH_INTERNAL_ASSERT( !c10::impl::tls_is_dispatch_key_excluded(at::DispatchKey::Tracer), "Cannot enable tracing within the scope of NoTracerDispatchMode!"); c10::impl::tls_set_dispatch_key_included(at::DispatchKey::Tracer, enabled); } struct NoTracerDispatchMode { c10::impl::ExcludeDispatchKeyGuard guard_{at::DispatchKey::Tracer}; }; } // namespace impl } // namespace tracer } // namespace at
5,572
39.678832
80
h
null
pytorch-main/aten/src/ATen/ThreadLocalState.h
#pragma once #include <stack> #include <c10/core/InferenceMode.h> #include <c10/core/impl/LocalDispatchKeySet.h> #include <c10/util/Exception.h> #include <c10/util/ThreadLocalDebugInfo.h> #include <ATen/FuncTorchTLS.h> #include <ATen/PythonTorchFunctionTLS.h> #include <ATen/SavedTensorHooks.h> #include <ATen/ThreadLocalPythonObjects.h> #include <ATen/record_function.h> #include <c10/core/impl/PythonDispatcherTLS.h> #include <c10/core/impl/TorchDispatchModeTLS.h> namespace at { // Thread local state contains values that are preserved across // thread boundaries (e.g. at::launch/JIT fork, autograd). // Note at::parallel_for doesn't preserve TLS across thread boundaries. class TORCH_API ThreadLocalState { public: // Saves the thread local variables' values and // returns them as a ThreadLocalState ThreadLocalState(); // set_grad_mode - force the value of the grad mode TLS in // the current state object. This is used for example in the // autograd engine. void set_grad_mode(bool enabled); // set_multithreading_enabled - force the value of the multithreadinmaximum // threads TLS in // the current state object. This is used for example in the // autograd engine. void set_multithreading_enabled(bool enabled); // Sets thread local variables in the current thread, // according to the thread boundary specified static void setThreadLocalState(const ThreadLocalState& state); private: c10::impl::LocalDispatchKeySet dispatch_key_; // ThreadLocalDebugInfo does not change after being created // with DebugInfoGuard std::shared_ptr<c10::ThreadLocalDebugInfo> debug_info_; // RecordFunction TLS RecordFunctionTLS rf_tls_; // TLS for out-of-tree functorch // See NOTE [functorch TLS in pytorch/pytorch] for why this needs to be a // pointer (spoiler alert: it's due to the indirection) // This needs to be a shared_ptr instead of a unique_ptr because // ThreadLocalState is copy-able and does indeed get copied. Maybe we can // consider adding an explicit copy constructor for ThreadLocalState in the // future but I didn't want to add one just for this. std::shared_ptr<const functorch::FuncTorchTLSBase> functorch_tls_; // TLS for AutogradModes AutogradState autograd_tls_; // TLS for enable_torch_dispatch_mode c10::impl::TorchDispatchModeTLS torch_dispatch_mode_state_; // TLS for enable_python_dispatcher c10::impl::PyInterpreter* python_dispatcher_state_; // TLS for __torch_function__ (mode and disable_torch_function) at::impl::PythonTorchFunctionTLS python_torch_function_state_; // TLS for saved tensors default hooks at::impl::SavedTensorDefaultHooksTLS saved_tensors_default_hooks_state_; bool functionalization_reapply_views_state_; // TLS for arbitrary python objects that is registered via hooks at::impl::ThreadLocalPythonObjects saved_objects_; friend class ThreadLocalStateGuard; }; // Guard to set and reset the thread local state class TORCH_API ThreadLocalStateGuard { public: explicit ThreadLocalStateGuard(const ThreadLocalState& state) : prev_state_(ThreadLocalState()) { // set the given state across the thread boundary ThreadLocalState::setThreadLocalState(state); } ~ThreadLocalStateGuard() { // restore previously set variables ThreadLocalState::setThreadLocalState(prev_state_); } private: const ThreadLocalState prev_state_; }; template <typename T> auto wrapPropagateTLSState(T callback) { return [tls_state = ThreadLocalState(), callback = std::move(callback)](auto&&... args) { ThreadLocalStateGuard g(tls_state); // Propagate value returned by callback(). return callback(std::forward<decltype(args)>(args)...); }; } } // namespace at
3,753
31.643478
77
h
null
pytorch-main/aten/src/ATen/TypeDefault.h
#pragma once #include <ATen/Dimname.h> #include <c10/core/MemoryFormat.h> #include <c10/core/QScheme.h> #include <c10/core/Scalar.h> #include <c10/core/TensorOptions.h> #include <c10/macros/Export.h> #include <c10/util/ArrayRef.h> #include <c10/util/intrusive_ptr.h> namespace c10 { struct Storage; } namespace at { class Tensor; using TensorList = ArrayRef<Tensor>; class Context; struct Generator; struct Quantizer; // This is temporary typedef to enable Quantizer in aten native function API // we'll remove them when we are actually exposing Quantizer class // to frontend using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&; } // namespace at
666
20.516129
76
h
null
pytorch-main/aten/src/ATen/Utils.h
#pragma once #include <ATen/EmptyTensor.h> #include <ATen/Formatting.h> #include <ATen/core/ATenGeneral.h> #include <ATen/core/Generator.h> #include <c10/core/ScalarType.h> #include <c10/core/StorageImpl.h> #include <c10/core/UndefinedTensorImpl.h> #include <c10/util/ArrayRef.h> #include <c10/util/Exception.h> #include <c10/util/accumulate.h> #include <c10/util/irange.h> #include <algorithm> #include <memory> #include <numeric> #include <sstream> #include <typeinfo> #define AT_DISALLOW_COPY_AND_ASSIGN(TypeName) \ TypeName(const TypeName&) = delete; \ void operator=(const TypeName&) = delete namespace at { TORCH_API int _crash_if_asan(int); // Converts a TensorList (i.e. ArrayRef<Tensor> to vector of TensorImpl*) // NB: This is ONLY used by legacy TH bindings, and ONLY used by cat. // Once cat is ported entirely to ATen this can be deleted! static inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap( ArrayRef<Tensor> tensors, const char* name, int pos, c10::DeviceType device_type, ScalarType scalar_type) { std::vector<TensorImpl*> unwrapped; unwrapped.reserve(tensors.size()); for (const auto i : c10::irange(tensors.size())) { const auto& expr = tensors[i]; if (expr.layout() != Layout::Strided) { AT_ERROR( "Expected dense tensor but got ", expr.layout(), " for sequence element ", i, " in sequence argument at position #", pos, " '", name, "'"); } if (expr.device().type() != device_type) { AT_ERROR( "Expected object of device type ", device_type, " but got device type ", expr.device().type(), " for sequence element ", i, " in sequence argument at position #", pos, " '", name, "'"); } if (expr.scalar_type() != scalar_type) { AT_ERROR( "Expected object of scalar type ", scalar_type, " but got scalar type ", expr.scalar_type(), " for sequence element ", i, " in sequence argument at position #", pos, " '", name, "'"); } unwrapped.emplace_back(expr.unsafeGetTensorImpl()); } return unwrapped; } template <size_t N> std::array<int64_t, N> check_intlist( ArrayRef<int64_t> list, const char* name, int pos) { if (list.empty()) { // TODO: is this necessary? We used to treat nullptr-vs-not in IntList // differently with strides as a way of faking optional. list = {}; } auto res = std::array<int64_t, N>(); if (list.size() == 1 && N > 1) { res.fill(list[0]); return res; } if (list.size() != N) { AT_ERROR( "Expected a list of ", N, " ints but got ", list.size(), " for argument #", pos, " '", name, "'"); } std::copy_n(list.begin(), N, res.begin()); return res; } using at::detail::check_size_nonnegative; namespace detail { template <typename T> TORCH_API Tensor tensor_cpu(ArrayRef<T> values, const TensorOptions& options); template <typename T> TORCH_API Tensor tensor_backend(ArrayRef<T> values, const TensorOptions& options); template <typename T> TORCH_API Tensor tensor_complex_cpu(ArrayRef<T> values, const TensorOptions& options); template <typename T> TORCH_API Tensor tensor_complex_backend(ArrayRef<T> values, const TensorOptions& options); } // namespace detail } // namespace at
3,569
24.683453
78
h
null
pytorch-main/aten/src/ATen/WrapDimUtils.h
#pragma once #include <ATen/core/IListRef.h> #include <ATen/core/Tensor.h> #include <c10/core/TensorImpl.h> #include <c10/core/WrapDimMinimal.h> #include <c10/util/irange.h> namespace at { // if dim_post_expr is 0 and wrap_scalar is true, then dim must be in the // range [-1, 0]. This is a special case for scalar tensors and manifests in // e.g. torch.sum(scalar_tensor, 0) Otherwise, dim should be in the range // [-dim_post_expr, dim_post_expr-1]. using c10::maybe_wrap_dim; inline int64_t maybe_wrap_dim(int64_t dim, TensorImpl* tensor) { return maybe_wrap_dim(dim, tensor->dim()); } inline int64_t maybe_wrap_dim(int64_t dim, TensorList tensors) { if (tensors.empty()) { // can't wrap empty TensorList; rely on underlying implementation to throw // error if necessary. return dim; } return maybe_wrap_dim(dim, tensors[0].dim()); } inline int64_t maybe_wrap_dim( int64_t dim, const std::vector<std::vector<int64_t>>& tensor_sizes) { if (tensor_sizes.empty()) { // can't wrap empty list; rely on underlying implementation to throw error // if necessary return dim; } return maybe_wrap_dim(dim, tensor_sizes[0].size()); } // Given an array of dimensions `dims` of length `ndims`, this function "Wraps" // each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be // specified using negative indices. // // Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will // allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for // dimensions not in the range [-dim_post_expr, dim_post_expr). inline void maybe_wrap_dims_n( int64_t* dims, int64_t ndims, int64_t dim_post_expr, bool wrap_scalars = true) { if (dim_post_expr <= 0) { if (wrap_scalars) { dim_post_expr = 1; // this will make range [-1, 0] } else { TORCH_CHECK_INDEX( ndims == 0, "Dimension specified as ", dims[0], " but tensor has no dimensions"); return; } } int64_t min = -dim_post_expr; int64_t max = dim_post_expr - 1; for (const auto i : c10::irange(ndims)) { auto& dim = dims[i]; if (dim < min || dim > max) { TORCH_CHECK_INDEX( false, "Dimension out of range (expected to be in range of [", min, ", ", max, "], but got ", dim, ")"); } if (dim < 0) dim += dim_post_expr; } } // Given a contiguous container of dimensions `dims`, this function "Wraps" // each dim in-place for a tensor of rank `dim_post_expr`, allowing dims to be // specified using negative indices. // // Additionally, if `wrap_scalar` is true then scalar tensors with rank 0, will // allow dimensions in the range [-1, 0]. Otherwise, an IndexError is raised for // dimensions not in the range [-dim_post_expr, dim_post_expr). template <typename Container> inline void maybe_wrap_dims( Container& dims, int64_t dim_post_expr, bool wrap_scalars = true) { return maybe_wrap_dims_n( dims.data(), dims.size(), dim_post_expr, wrap_scalars); } // previously, size [0] tensors were the only possible empty tensors; thus, it // wasn't possible to cat empty tensors unless all the other tensors were // 1-dimensional, so we allowed these tensors to be "skipped" (both for wrap // dimension behavior and dimension size checking). We maintain this behavior // for backwards compatibility, but only for this specific size (i.e. other // empty sizes are not skipped). template <typename T> inline int64_t _legacy_cat_wrap_dim( int64_t dim, const std::vector<std::vector<T>>& tensor_sizes) { for (auto& sizes : tensor_sizes) { if (sizes.size() == 1 && sizes[0] == 0) { continue; } return maybe_wrap_dim(dim, sizes.size()); } return dim; } inline int64_t legacy_cat_wrap_dim( int64_t dim, const std::vector<std::vector<int64_t>>& tensor_sizes) { return _legacy_cat_wrap_dim<int64_t>(dim, tensor_sizes); } inline int64_t legacy_cat_wrap_dim_symint( int64_t dim, const std::vector<std::vector<c10::SymInt>>& tensor_sizes) { return _legacy_cat_wrap_dim<c10::SymInt>(dim, tensor_sizes); } inline int64_t legacy_cat_wrap_dim( int64_t dim, const MaterializedITensorListRef& tensors) { for (const Tensor& tensor : tensors) { if (tensor.dim() == 1 && tensor.sizes()[0] == 0) { continue; } return maybe_wrap_dim(dim, tensor.dim()); } return dim; } // wrap negative dims in a vector inline void wrap_all_dims( std::vector<int64_t>& dims_to_wrap, int64_t tensor_total_dims) { for (const auto i : c10::irange(dims_to_wrap.size())) { dims_to_wrap[i] = maybe_wrap_dim(dims_to_wrap[i], tensor_total_dims); } } } // namespace at
4,778
30.032468
80
h
null
pytorch-main/aten/src/ATen/WrapDimUtilsMulti.h
#pragma once #include <ATen/WrapDimUtils.h> #include <c10/core/TensorImpl.h> #include <c10/util/irange.h> #include <bitset> #include <sstream> namespace at { // This is in an extra file to work around strange interaction of // bitset on Windows with operator overloading constexpr size_t dim_bitset_size = 64; static inline std::bitset<dim_bitset_size> dim_list_to_bitset( OptionalIntArrayRef opt_dims, int64_t ndims) { TORCH_CHECK( ndims <= (int64_t)dim_bitset_size, "only tensors with up to ", dim_bitset_size, " dims are supported"); std::bitset<dim_bitset_size> seen; if (opt_dims.has_value()) { auto dims = opt_dims.value(); for (const auto i : c10::irange(dims.size())) { size_t dim = maybe_wrap_dim(dims[i], ndims); TORCH_CHECK( !seen[dim], "dim ", dim, " appears multiple times in the list of dims"); seen[dim] = true; } } else { for (int64_t dim = 0; dim < ndims; dim++) { seen[dim] = true; } } return seen; } } // namespace at
1,071
22.822222
65
h