source
stringlengths
3
92
c
stringlengths
26
2.25M
flip_compute.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <stdint.h> #include <bitset> #include <vector> #include "lite/core/kernel.h" namespace paddle { namespace lite { namespace kernels { namespace host { template <typename T> class FlipCompute : public KernelLite<TARGET(kHost), PRECISION(kAny)> { public: using param_t = operators::FcParam; void Run() { constexpr size_t dim_bitset_size = 64; auto& param = this->Param<operators::FlipParam>(); auto x = param.X; auto out = param.Out; auto flip_dims = param.axis; auto x_dims = x->dims(); const int total_dims = x_dims.size(); std::bitset<dim_bitset_size> dim_bitset; for (size_t i = 0; i < flip_dims.size(); ++i) { int dim = flip_dims[i]; if (flip_dims[i] < 0) { dim += total_dims; } dim_bitset[dim] = true; } auto x_strides = x_dims.Vectorize(); auto numel = x->numel(); const T* x_data = x->template data<T>(); T* out_data = out->template mutable_data<T>(); #pragma omp parallel for for (int64_t i = 0; i < numel; ++i) { int64_t cur_indices = i; int64_t rem = 0; int64_t dst_offset = 0; for (int d = 0; d < total_dims; ++d) { int64_t temp = cur_indices; cur_indices = cur_indices / x_strides[d]; rem = temp - cur_indices * x_strides[d]; dst_offset += dim_bitset[d] ? (x_dims[d] - 1 - cur_indices) * x_strides[d] : cur_indices * x_strides[d]; cur_indices = rem; } out_data[i] = x_data[dst_offset]; } } ~FlipCompute() = default; }; } // namespace host } // namespace kernels } // namespace lite } // namespace paddle
callback.h
#define _BSD_SOURCE #define _DEFAULT_SOURCE #include <stdio.h> #include <inttypes.h> #include <omp.h> #include <ompt.h> #include "ompt-signal.h" // Used to detect architecture #include "../../src/kmp_platform.h" static const char* ompt_thread_type_t_values[] = { NULL, "ompt_thread_initial", "ompt_thread_worker", "ompt_thread_other" }; static const char* ompt_task_status_t_values[] = { NULL, "ompt_task_complete", "ompt_task_yield", "ompt_task_cancel", "ompt_task_others" }; static const char* ompt_cancel_flag_t_values[] = { "ompt_cancel_parallel", "ompt_cancel_sections", "ompt_cancel_do", "ompt_cancel_taskgroup", "ompt_cancel_activated", "ompt_cancel_detected", "ompt_cancel_discarded_task" }; static ompt_set_callback_t ompt_set_callback; static ompt_get_task_info_t ompt_get_task_info; static ompt_get_thread_data_t ompt_get_thread_data; static ompt_get_parallel_info_t ompt_get_parallel_info; static ompt_get_unique_id_t ompt_get_unique_id; static ompt_get_num_procs_t ompt_get_num_procs; static ompt_get_num_places_t ompt_get_num_places; static ompt_get_place_proc_ids_t ompt_get_place_proc_ids; static ompt_get_place_num_t ompt_get_place_num; static ompt_get_partition_place_nums_t ompt_get_partition_place_nums; static ompt_get_proc_id_t ompt_get_proc_id; static ompt_enumerate_states_t ompt_enumerate_states; static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls; static void print_ids(int level) { ompt_frame_t* frame ; ompt_data_t* parallel_data; ompt_data_t* task_data; int exists_task = ompt_get_task_info(level, NULL, &task_data, &frame, &parallel_data, NULL); if (frame) { printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p\n", ompt_get_thread_data()->value, level, exists_task ? parallel_data->value : 0, exists_task ? task_data->value : 0, frame->exit_frame, frame->enter_frame); } else printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", frame=%p\n", ompt_get_thread_data()->value, level, exists_task ? parallel_data->value : 0, exists_task ? task_data->value : 0, frame); } #define print_frame(level)\ do {\ printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", ompt_get_thread_data()->value, level, __builtin_frame_address(level));\ } while(0) // clang (version 5.0 and above) adds an intermediate function call with debug flag (-g) #if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN) #if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5 #define print_frame_from_outlined_fn(level) print_frame(level+1) #else #define print_frame_from_outlined_fn(level) print_frame(level) #endif #if defined(__clang__) && __clang_major__ >= 5 #warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information." #warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!" #endif #endif // This macro helps to define a label at the current position that can be used // to get the current address in the code. // // For print_current_address(): // To reliably determine the offset between the address of the label and the // actual return address, we insert a NOP instruction as a jump target as the // compiler would otherwise insert an instruction that we can't control. The // instruction length is target dependent and is explained below. // // (The empty block between "#pragma omp ..." and the __asm__ statement is a // workaround for a bug in the Intel Compiler.) #define define_ompt_label(id) \ {} \ __asm__("nop"); \ ompt_label_##id: // This macro helps to get the address of a label that is inserted by the above // macro define_ompt_label(). The address is obtained with a GNU extension // (&&label) that has been tested with gcc, clang and icc. #define get_ompt_label_address(id) (&& ompt_label_##id) // This macro prints the exact address that a previously called runtime function // returns to. #define print_current_address(id) \ define_ompt_label(id) \ print_possible_return_addresses(get_ompt_label_address(id)) #if KMP_ARCH_X86 || KMP_ARCH_X86_64 // On X86 the NOP instruction is 1 byte long. In addition, the comiler inserts // a MOV instruction for non-void runtime functions which is 3 bytes long. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \ ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4) #elif KMP_ARCH_PPC64 // On Power the NOP instruction is 4 bytes long. In addition, the compiler // inserts an LD instruction which accounts for another 4 bytes. In contrast to // X86 this instruction is always there, even for void runtime functions. #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 8) #elif KMP_ARCH_AARCH64 // On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted // store instruction (another 4 bytes long). #define print_possible_return_addresses(addr) \ printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \ ((char *)addr) - 4, ((char *)addr) - 8) #else #error Unsupported target architecture, cannot determine address offset! #endif // This macro performs a somewhat similar job to print_current_address(), except // that it discards a certain number of nibbles from the address and only prints // the most significant bits / nibbles. This can be used for cases where the // return address can only be approximated. // // To account for overflows (ie the most significant bits / nibbles have just // changed as we are a few bytes above the relevant power of two) the addresses // of the "current" and of the "previous block" are printed. #define print_fuzzy_address(id) \ define_ompt_label(id) \ print_fuzzy_address_blocks(get_ompt_label_address(id)) // If you change this define you need to adapt all capture patterns in the tests // to include or discard the new number of nibbles! #define FUZZY_ADDRESS_DISCARD_NIBBLES 2 #define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4)) #define print_fuzzy_address_blocks(addr) \ printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \ ompt_get_thread_data()->value, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \ ((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, addr) static void format_task_type(int type, char* buffer) { char* progress = buffer; if(type & ompt_task_initial) progress += sprintf(progress, "ompt_task_initial"); if(type & ompt_task_implicit) progress += sprintf(progress, "ompt_task_implicit"); if(type & ompt_task_explicit) progress += sprintf(progress, "ompt_task_explicit"); if(type & ompt_task_target) progress += sprintf(progress, "ompt_task_target"); if(type & ompt_task_undeferred) progress += sprintf(progress, "|ompt_task_undeferred"); if(type & ompt_task_untied) progress += sprintf(progress, "|ompt_task_untied"); if(type & ompt_task_final) progress += sprintf(progress, "|ompt_task_final"); if(type & ompt_task_mergeable) progress += sprintf(progress, "|ompt_task_mergeable"); if(type & ompt_task_merged) progress += sprintf(progress, "|ompt_task_merged"); } static void on_ompt_callback_mutex_acquire( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_acquired( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_acquired_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_acquired_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_acquired_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_mutex_released( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_release_nest_lock_last: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_critical: printf("%" PRIu64 ": ompt_event_release_critical: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_atomic: printf("%" PRIu64 ": ompt_event_release_atomic: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_ordered: printf("%" PRIu64 ": ompt_event_release_ordered: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_nest_lock( ompt_scope_endpoint_t endpoint, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_release_nest_lock_prev: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; } } static void on_ompt_callback_sync_region( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); print_ids(0); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_sync_region_wait( ompt_sync_region_kind_t kind, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } break; case ompt_scope_end: switch(kind) { case ompt_sync_region_barrier: printf("%" PRIu64 ": ompt_event_wait_barrier_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskwait: printf("%" PRIu64 ": ompt_event_wait_taskwait_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; case ompt_sync_region_taskgroup: printf("%" PRIu64 ": ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, codeptr_ra); break; } break; } } static void on_ompt_callback_flush( ompt_data_t *thread_data, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_flush: codeptr_ra=%p\n", thread_data->value, codeptr_ra); } static void on_ompt_callback_cancel( ompt_data_t *task_data, int flags, const void *codeptr_ra) { const char* first_flag_value; const char* second_flag_value; if(flags & ompt_cancel_parallel) first_flag_value = ompt_cancel_flag_t_values[0]; else if(flags & ompt_cancel_sections) first_flag_value = ompt_cancel_flag_t_values[1]; else if(flags & ompt_cancel_do) first_flag_value = ompt_cancel_flag_t_values[2]; else if(flags & ompt_cancel_taskgroup) first_flag_value = ompt_cancel_flag_t_values[3]; if(flags & ompt_cancel_activated) second_flag_value = ompt_cancel_flag_t_values[4]; else if(flags & ompt_cancel_detected) second_flag_value = ompt_cancel_flag_t_values[5]; else if(flags & ompt_cancel_discarded_task) second_flag_value = ompt_cancel_flag_t_values[6]; printf("%" PRIu64 ": ompt_event_cancel: task_data=%" PRIu64 ", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, task_data->value, first_flag_value, second_flag_value, flags, codeptr_ra); } static void on_ompt_callback_idle( ompt_scope_endpoint_t endpoint) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_idle_begin:\n", ompt_get_thread_data()->value); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_idle_end:\n", ompt_get_thread_data()->value); break; } } static void on_ompt_callback_implicit_task( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, unsigned int team_size, unsigned int thread_num) { switch(endpoint) { case ompt_scope_begin: if(task_data->ptr) printf("%s\n", "0: task_data initially not null"); task_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_implicit_task_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, team_size, thread_num); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_implicit_task_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", team_size=%" PRIu32 ", thread_num=%" PRIu32 "\n", ompt_get_thread_data()->value, (parallel_data)?parallel_data->value:0, task_data->value, team_size, thread_num); break; } } static void on_ompt_callback_lock_init( ompt_mutex_kind_t kind, unsigned int hint, unsigned int impl, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32 ", impl=%" PRIu32 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra); break; default: break; } } static void on_ompt_callback_lock_destroy( ompt_mutex_kind_t kind, ompt_wait_id_t wait_id, const void *codeptr_ra) { switch(kind) { case ompt_mutex_lock: printf("%" PRIu64 ": ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; case ompt_mutex_nest_lock: printf("%" PRIu64 ": ompt_event_destroy_nest_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n", ompt_get_thread_data()->value, wait_id, codeptr_ra); break; default: break; } } static void on_ompt_callback_work( ompt_work_type_t wstype, ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, uint64_t count, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_begin: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; case ompt_scope_end: switch(wstype) { case ompt_work_loop: printf("%" PRIu64 ": ompt_event_loop_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_sections: printf("%" PRIu64 ": ompt_event_sections_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_executor: printf("%" PRIu64 ": ompt_event_single_in_block_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_single_other: printf("%" PRIu64 ": ompt_event_single_others_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_workshare: //impl break; case ompt_work_distribute: printf("%" PRIu64 ": ompt_event_distribute_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; case ompt_work_taskloop: //impl printf("%" PRIu64 ": ompt_event_taskloop_end: parallel_id=%" PRIu64 ", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra, count); break; } break; } } static void on_ompt_callback_master( ompt_scope_endpoint_t endpoint, ompt_data_t *parallel_data, ompt_data_t *task_data, const void *codeptr_ra) { switch(endpoint) { case ompt_scope_begin: printf("%" PRIu64 ": ompt_event_master_begin: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; case ompt_scope_end: printf("%" PRIu64 ": ompt_event_master_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, task_data->value, codeptr_ra); break; } } static void on_ompt_callback_parallel_begin( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* parallel_data, uint32_t requested_team_size, ompt_invoker_t invoker, const void *codeptr_ra) { if(parallel_data->ptr) printf("0: parallel_data initially not null\n"); parallel_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_parallel_begin: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, parallel_id=%" PRIu64 ", requested_team_size=%" PRIu32 ", codeptr_ra=%p, invoker=%d\n", ompt_get_thread_data()->value, encountering_task_data->value, encountering_task_frame->exit_frame, encountering_task_frame->enter_frame, parallel_data->value, requested_team_size, codeptr_ra, invoker); } static void on_ompt_callback_parallel_end( ompt_data_t *parallel_data, ompt_data_t *encountering_task_data, ompt_invoker_t invoker, const void *codeptr_ra) { printf("%" PRIu64 ": ompt_event_parallel_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n", ompt_get_thread_data()->value, parallel_data->value, encountering_task_data->value, invoker, codeptr_ra); } static void on_ompt_callback_task_create( ompt_data_t *encountering_task_data, const ompt_frame_t *encountering_task_frame, ompt_data_t* new_task_data, int type, int has_dependences, const void *codeptr_ra) { if(new_task_data->ptr) printf("0: new_task_data initially not null\n"); new_task_data->value = ompt_get_unique_id(); char buffer[2048]; format_task_type(type, buffer); //there is no parallel_begin callback for implicit parallel region //thus it is initialized in initial task if(type & ompt_task_initial) { ompt_data_t *parallel_data; ompt_get_parallel_info(0, &parallel_data, NULL); if(parallel_data->ptr) printf("%s\n", "0: parallel_data initially not null"); parallel_data->value = ompt_get_unique_id(); } printf("%" PRIu64 ": ompt_event_task_create: parent_task_id=%" PRIu64 ", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, new_task_id=%" PRIu64 ", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n", ompt_get_thread_data()->value, encountering_task_data ? encountering_task_data->value : 0, encountering_task_frame ? encountering_task_frame->exit_frame : NULL, encountering_task_frame ? encountering_task_frame->enter_frame : NULL, new_task_data->value, codeptr_ra, buffer, type, has_dependences ? "yes" : "no"); } static void on_ompt_callback_task_schedule( ompt_data_t *first_task_data, ompt_task_status_t prior_task_status, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_schedule: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value, ompt_task_status_t_values[prior_task_status], prior_task_status); if(prior_task_status == ompt_task_complete) { printf("%" PRIu64 ": ompt_event_task_end: task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value); } } static void on_ompt_callback_task_dependences( ompt_data_t *task_data, const ompt_task_dependence_t *deps, int ndeps) { printf("%" PRIu64 ": ompt_event_task_dependences: task_id=%" PRIu64 ", deps=%p, ndeps=%d\n", ompt_get_thread_data()->value, task_data->value, (void *)deps, ndeps); } static void on_ompt_callback_task_dependence( ompt_data_t *first_task_data, ompt_data_t *second_task_data) { printf("%" PRIu64 ": ompt_event_task_dependence_pair: first_task_id=%" PRIu64 ", second_task_id=%" PRIu64 "\n", ompt_get_thread_data()->value, first_task_data->value, second_task_data->value); } static void on_ompt_callback_thread_begin( ompt_thread_type_t thread_type, ompt_data_t *thread_data) { if(thread_data->ptr) printf("%s\n", "0: thread_data initially not null"); thread_data->value = ompt_get_unique_id(); printf("%" PRIu64 ": ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, ompt_thread_type_t_values[thread_type], thread_type, thread_data->value); } static void on_ompt_callback_thread_end( ompt_data_t *thread_data) { printf("%" PRIu64 ": ompt_event_thread_end: thread_id=%" PRIu64 "\n", ompt_get_thread_data()->value, thread_data->value); } static int on_ompt_callback_control_tool( uint64_t command, uint64_t modifier, void *arg, const void *codeptr_ra) { ompt_frame_t* omptTaskFrame; ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL); printf("%" PRIu64 ": ompt_event_control_tool: command=%" PRIu64 ", modifier=%" PRIu64 ", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, current_task_frame.reenter=%p \n", ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra, omptTaskFrame->exit_frame, omptTaskFrame->enter_frame); return 0; //success } #define register_callback_t(name, type) \ do{ \ type f_##name = &on_##name; \ if (ompt_set_callback(name, (ompt_callback_t)f_##name) == \ ompt_set_never) \ printf("0: Could not register callback '" #name "'\n"); \ }while(0) #define register_callback(name) register_callback_t(name, name##_t) int ompt_initialize( ompt_function_lookup_t lookup, ompt_data_t *tool_data) { ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback"); ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info"); ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data"); ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info"); ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id"); ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs"); ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places"); ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids"); ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num"); ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums"); ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id"); ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states"); ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls"); register_callback(ompt_callback_mutex_acquire); register_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t); register_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t); register_callback(ompt_callback_nest_lock); register_callback(ompt_callback_sync_region); register_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t); register_callback(ompt_callback_control_tool); register_callback(ompt_callback_flush); register_callback(ompt_callback_cancel); register_callback(ompt_callback_idle); register_callback(ompt_callback_implicit_task); register_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t); register_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t); register_callback(ompt_callback_work); register_callback(ompt_callback_master); register_callback(ompt_callback_parallel_begin); register_callback(ompt_callback_parallel_end); register_callback(ompt_callback_task_create); register_callback(ompt_callback_task_schedule); register_callback(ompt_callback_task_dependences); register_callback(ompt_callback_task_dependence); register_callback(ompt_callback_thread_begin); register_callback(ompt_callback_thread_end); printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t *tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; }
move.h
#pragma once #include "core.h" #include "energy.h" #include "average.h" //#include "analysis.h" #include "potentials.h" #include "mpi.h" namespace Faunus { namespace Move { class Movebase { private: virtual void _move(Change&)=0; //!< Perform move and modify change object virtual void _accept(Change&) {}; //!< Call after move is accepted virtual void _reject(Change&) {}; //!< Call after move is rejected virtual void _to_json(json &j) const=0; //!< Extra info for report if needed virtual void _from_json(const json &j)=0; //!< Extra info for report if needed TimeRelativeOfTotal<std::chrono::microseconds> timer; protected: unsigned long cnt=0; unsigned long accepted=0; unsigned long rejected=0; public: static Random slump; //!< Shared for all moves std::string name; //!< Name of move std::string cite; //!< Reference int repeat=1; //!< How many times the move should be repeated per sweep inline void from_json(const json &j) { auto it = j.find("repeat"); if (it!=j.end()) { if (it->is_number()) repeat = it->get<double>(); else if (it->is_string()) if (it->get<std::string>()=="N") repeat = -1; } _from_json(j); if (repeat<0) repeat=0; } inline void to_json(json &j) const { _to_json(j); j["relative time"] = timer.result(); j["acceptance"] = double(accepted)/cnt; j["repeat"] = repeat; j["moves"] = cnt; if (!cite.empty()) j["cite"] = cite; _roundjson(j, 3); } //!< JSON report w. statistics, output etc. inline void move(Change &change) { timer.start(); cnt++; change.clear(); _move(change); if (change.empty()) timer.stop(); } //!< Perform move and modify given change object inline void accept(Change &c) { accepted++; _accept(c); timer.stop(); } inline void reject(Change &c) { rejected++; _reject(c); timer.stop(); } inline virtual double bias(Change &c, double uold, double unew) { return 0; // du } //!< adds extra energy change not captured by the Hamiltonian }; Random Movebase::slump; // static instance of Random (shared for all moves) inline void from_json(const json &j, Movebase &m) { m.from_json( j ); } //!< Configure any move via json inline void to_json(json &j, const Movebase &m) { assert( !m.name.empty() ); m.to_json(j[m.name]); } /** * @brief Swap the charge of a single atom */ template<typename Tspace> class AtomicSwapCharge : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on int molid=-1; double ln10 = log(10); double pKa, pH; Average<double> msqd; // mean squared displacement double _sqd, _bias; // squared displament std::string molname; // name of molecule to operate on Change::data cdata; void _to_json(json &j) const override { j = { {"pH", pH}, {"pka", pKa}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molname} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); pH = j.at("pH").get<double>(); pKa = j.at("pKa").get<double>(); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); // repeat for each molecule... if (repeat>0) repeat = repeat * v.front().size(); // ...and for each atom } } catch (std::exception &e) { std::cerr << name << ": " << e.what(); throw; } } //!< Configure via json object typename Tpvec::iterator randomAtom() { assert(molid>=0); auto mollist = spc.findMolecules( molid ); // all `molid` groups if (size(mollist)>0) { auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator if (!git->empty()) { auto p = slump.sample( git->begin(), git->end() ); // random particle iterator cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group return p; } } return spc.p.end(); } void _move(Change &change) override { auto p = randomAtom(); if (p!=spc.p.end()) { auto& g = spc.groups[cdata.index]; double oldcharge = p->charge; p->charge = fabs(oldcharge - 1); _sqd = fabs(oldcharge - 1) - oldcharge; change.groups.push_back( cdata ); // add to list of moved groups _bias = _sqd*(pH-pKa)*ln10; // one may add bias here... } else std::cerr << name << ": no atoms found" << std::endl; } double bias(Change &change, double uold, double unew) override { return _bias; } //!< adds extra energy change not captured by the Hamiltonian void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: AtomicSwapCharge(Tspace &spc) : spc(spc) { name = "swapcharge"; repeat = -1; // meaning repeat N times cdata.atoms.resize(1); cdata.internal=true; } }; /** * @brief Translate and rotate a molecular group */ template<typename Tspace> class AtomicTranslateRotate : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on int molid=-1; Point dir={1,1,1}; Average<double> msqd; // mean squared displacement double _sqd; // squared displament std::string molname; // name of molecule to operate on Change::data cdata; void _to_json(json &j) const override { j = { {"dir", dir}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molname} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); dir = j.value("dir", Point(1,1,1)); if (repeat<0) { auto v = spc.findMolecules(molid, Tspace::ALL ); repeat = std::distance(v.begin(), v.end()); // repeat for each molecule... if (repeat>0) repeat = repeat * v.front().size(); // ...and for each atom } } catch (std::exception &e) { std::cerr << name << ": " << e.what(); throw; } } //!< Configure via json object typename Tpvec::iterator randomAtom() { assert(molid>=0); //std::cout<<"molid "<<molid<<std::endl; auto mollist = spc.findMolecules( molid, Tspace::ALL ); // all `molid` groups if (size(mollist)>0) { //std::cout<<"looking for atoms"<<std::endl; auto git = slump.sample( mollist.begin(), mollist.end() ); // random molecule iterator if (!git->empty()) { //std::cout<<"found molecule"<<std::endl; auto p = slump.sample( git->begin(), git->end() ); // random particle iterator cdata.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group cdata.atoms[0] = std::distance(git->begin(), p); // index of particle rel. to group return p; } } return spc.p.end(); } void _move(Change &change) override { auto p = randomAtom(); if (p!=spc.p.end()) { double dp = atoms<Tparticle>.at(p->id).dp; double dprot = atoms<Tparticle>.at(p->id).dprot; auto& g = spc.groups[cdata.index]; if (dp>0) { // translate Point oldpos = p->pos; p->pos += 0.5 * dp * ranunit(slump).cwiseProduct(dir); spc.geo.boundaryFunc(p->pos); _sqd = spc.geo.sqdist(oldpos, p->pos); // squared displacement if (!g.atomic) g.cm = Geometry::massCenter(g.begin(), g.end(), spc.geo.boundaryFunc, -g.cm); } if (dprot>0) { // rotate Point u = ranunit(slump); double angle = dprot * (slump()-0.5); Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) ); p->rotate(Q, Q.toRotationMatrix()); } if (dp>0 || dprot>0) change.groups.push_back( cdata ); // add to list of moved groups } else std::cerr << name << ": no atoms found" << std::endl; } void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: AtomicTranslateRotate(Tspace &spc) : spc(spc) { name = "transrot"; repeat = -1; // meaning repeat N times cdata.atoms.resize(1); cdata.internal=true; } }; /** * @brief Translate and rotate a molecular group */ template<typename Tspace> class TranslateRotate : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; Tspace& spc; // Space to operate on int molid=-1; double dptrans=0; double dprot=0; Point dir={1,1,1}; double _sqd; // squared displacement Average<double> msqd; // mean squared displacement void _to_json(json &j) const override { j = { {"dir", dir}, {"dp", dptrans}, {"dprot", dprot}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molecules<Tpvec>[molid].name} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { std::string molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); dir = j.value("dir", Point(1,1,1)); dprot = j.at("dprot"); dptrans = j.at("dp"); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); } } catch (std::exception &e) { throw std::runtime_error(name+": " + e.what()); } } //!< Configure via json object void _move(Change &change) override { assert(molid>=0); assert(!spc.groups.empty()); assert(spc.geo.getVolume()>0); // pick random group from the system matching molecule type // TODO: This can be slow -- implement look-up-table in Space auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid' if (size(mollist)>0) { auto it = slump.sample( mollist.begin(), mollist.end() ); if (!it->empty()) { assert(it->id==molid); if (dptrans>0) { // translate Point oldcm = it->cm; Point dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans; it->translate( dp, spc.geo.boundaryFunc ); _sqd = spc.geo.sqdist(oldcm, it->cm); // squared displacement } if (dprot>0) { // rotate Point u = ranunit(slump); double angle = dprot * (slump()-0.5); Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) ); it->rotate(Q, spc.geo.boundaryFunc); } if (dptrans>0||dprot>0) { // define changes Change::data d; d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups } assert( spc.geo.sqdist( it->cm, Geometry::massCenter(it->begin(),it->end(),spc.geo.boundaryFunc,-it->cm) ) < 1e-9 ); } } else std::cerr << name << ": no molecules found" << std::endl; } void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: TranslateRotate(Tspace &spc) : spc(spc) { name = "moltransrot"; repeat = -1; // meaning repeat N times } }; #ifdef DOCTEST_LIBRARY_INCLUDED TEST_CASE("[Faunus] TranslateRotate") { typedef Particle<Radius, Charge, Dipole, Cigar> Tparticle; typedef Space<Geometry::Cuboid, Tparticle> Tspace; typedef typename Tspace::Tpvec Tpvec; CHECK( !atoms<Tparticle>.empty() ); // set in a previous test CHECK( !molecules<Tpvec>.empty() ); // set in a previous test Tspace spc; TranslateRotate<Tspace> mv(spc); json j = R"( {"molecule":"B", "dp":1.0, "dprot":0.5, "dir":[0,1,0], "repeat":2 })"_json; mv.from_json(j); j = json(mv).at(mv.name); CHECK( j.at("molecule") == "B"); CHECK( j.at("dir") == Point(0,1,0) ); CHECK( j.at("dp") == 1.0 ); CHECK( j.at("repeat") == 2 ); CHECK( j.at("dprot") == 0.5 ); } #endif /** * @brief QuadrantJump translates a molecule to another quadrant * considering as the origin the center of the box or the center of mass of a range of atomic indexes * specified by "index": [start:stop]. */ template<typename Tspace> class QuadrantJump : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on int molid=-1; Point dir={1,1,1}; std::vector<size_t> index; double _sqd; // squared displacement Average<double> msqd; // mean squared displacement void _to_json(json &j) const override { j = { {"dir", dir}, {"molid", molid}, {u8::rootof + u8::bracket("r" + u8::squared), std::sqrt(msqd.avg())}, {"molecule", molecules<Tpvec>[molid].name} }; _roundjson(j,3); } void _from_json(const json &j) override { assert(!molecules<Tpvec>.empty()); try { std::string molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); dir = j.value("dir", Point(1,1,1)); index = j.value("index", decltype(index)()); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); } } catch (std::exception &e) { throw std::runtime_error(name+": " + e.what()); } } //!< Configure via json object void _move(Change &change) override { assert(molid>=0); assert(!spc.groups.empty()); assert(spc.geo.getVolume()>0); // pick random group from the system matching molecule type // TODO: This can be slow -- implement look-up-table in Space auto mollist = spc.findMolecules( molid, Tspace::ACTIVE ); // list of molecules w. 'molid' if (size(mollist)>0) { auto it = slump.sample( mollist.begin(), mollist.end() ); if (not it->empty()) { assert(it->id==molid); Point oldcm = it->cm; if (index.size()==2) { Group<Tparticle> g(spc.p.begin(), spc.p.end()); auto cm_O = Geometry::massCenter(g.begin()+index[0], g.begin()+index[1], spc.geo.boundaryFunc ); it->translate( -2*spc.geo.vdist(oldcm, cm_O).cwiseProduct(dir.cast<double>()), spc.geo.boundaryFunc ); } else { it->translate( -2*oldcm.cwiseProduct(dir.cast<double>()), spc.geo.boundaryFunc ); } _sqd = spc.geo.sqdist(oldcm, it->cm); // squared displacement Change::data d; d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups assert( spc.geo.sqdist( it->cm, Geometry::massCenter(it->begin(),it->end(),spc.geo.boundaryFunc,-it->cm) ) < 1e-9 ); } } else std::cerr << name << ": no molecules found" << std::endl; } void _accept(Change &change) override { msqd += _sqd; } void _reject(Change &change) override { msqd += 0; } public: QuadrantJump(Tspace &spc) : spc(spc) { name = "quadrantjump"; repeat = -1; // meaning repeat N times } }; template<typename Tspace> class VolumeMove : public Movebase { private: const std::map<std::string, Geometry::VolumeMethod> methods = { {"xy", Geometry::XY}, {"isotropic", Geometry::ISOTROPIC}, {"isochoric", Geometry::ISOCHORIC} }; typename decltype(methods)::const_iterator method; typedef typename Tspace::Tpvec Tpvec; Tspace& spc; Average<double> msqd; // mean squared displacement double dV=0, deltaV=0, Vnew=0, Vold=0; void _to_json(json &j) const override { using namespace u8; j = { {"dV", dV}, {"method", method->first}, {rootof + bracket(Delta + "V" + squared), std::sqrt(msqd.avg())}, {cuberoot + rootof + bracket(Delta + "V" + squared), std::cbrt(std::sqrt(msqd.avg()))} }; _roundjson(j,3); } void _from_json(const json &j) override { method = methods.find( j.value("method", "isotropic") ); if (method==methods.end()) std::runtime_error("unknown volume change method"); dV = j.at("dV"); } void _move(Change &change) override { if (dV>0) { change.dV=true; change.all=true; Vold = spc.geo.getVolume(); if (method->second == Geometry::ISOCHORIC) Vold = std::pow(Vold,1.0/3.0); // volume is constant Vnew = std::exp(std::log(Vold) + (slump()-0.5) * dV); deltaV = Vnew-Vold; spc.scaleVolume(Vnew, method->second); } else deltaV=0; } void _accept(Change &change) override { msqd += deltaV*deltaV; } void _reject(Change &change) override { msqd += 0; } public: VolumeMove(Tspace &spc) : spc(spc) { name = "volume"; repeat = 1; } }; // end of VolumeMove /* * @brief Establishes equilibrium of matter * Establishes equilibrium of matter between all species * * Consider the dissociation process AX=A+X. This class will locate * all species of type AX and A and make a MC swap move between them. * X is implicit, meaning that it enters only with its chemical potential * (activity). The titrating species, their dissociation constants * and the chemical potential of the titrant are read from a * `processes` JSON object. * For example, for proton titration of phosphate one would * use the following JSON input (pH 7.0): * * @todo * Implement classification of reactions to group weight in * mc sweep {refrerence : prob(reference)} * */ template<typename Tspace> class SpeciationMove : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; Tspace& spc; Tspace *otherspc; ReactionData<Tpvec> *trialprocess; std::map<std::string, Average<double>> accmap; double log_k; bool forward; std::vector<int> molDel; // index of groups to delete std::vector<int> atomDel; // atom index to delete std::map<int, int> molcnt_ins, atomcnt_ins, molcnt_del, atomcnt_del, molcnt, atomcnt; // id's and number of inserted/deleted mols and atoms std::multimap<int, Tpvec> pmap; // coordinates of mols and atoms to be inserted unsigned int Ndeleted, Ninserted; // Number of accepted deletions and insertions void _to_json(json &j) const override { j = { // { "replicas", mpi.nproc() }, // { "datasize", pt.getFormat() } }; json &_j = j["reactions"]; _j = json::object(); for (auto &m : accmap) _j[m.first] = { {"attempts", m.second.cnt}, {"acceptance", m.second.avg()} }; } void _from_json(const json &j) override { //j["speciation"] = "speciation"; } public: SpeciationMove(Tspace &spc) : spc(spc) { name = "speciation"; repeat = 1; } void setOther(Tspace &ospc) { otherspc = &ospc; } double energy(); //!< Returns intrinsic energy of the process void _move(Change &change) override { if ( reactions<Tpvec>.size()>0 ) { auto rit = slump.sample( reactions<Tpvec>.begin(), reactions<Tpvec>.end() ); log_k = rit->log_k; forward = (bool)slump.range(0,1); // random boolean trialprocess = &(*rit); if ( rit->empty(forward) ) // Enforce canonic constraint if invoked return; //Out of material, slip out the back door for (auto &m : rit->Molecules2Add( !forward )) { // Delete checks auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { if( size(mollist)!=1 ) // There can be only one throw std::runtime_error("Bad definition: One group per atomic molecule!"); auto git = mollist.begin(); if ( git->size() < m.second ) // assure that there are atoms enough in the group return; } else { mollist = spc.findMolecules( m.first, Tspace::ACTIVE); if ( size(mollist) < m.second ) return; // Not possible to perform change, escape through the back door } } for (auto &m : rit->Molecules2Add( forward )) { // Addition checks auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { if( size(mollist)!=1 ) // There can be only one throw std::runtime_error("Bad definition: One group per atomic molecule!"); auto git = mollist.begin(); if ( (git->size() + m.second) > git->capacity() ) // assure that there are atoms enough in the group return; // if not slip out the back door } else { mollist = spc.findMolecules( m.first, Tspace::INACTIVE); if ( size(mollist) < m.second ) return; // Not possible to perform change, escape through the back door } } //The move is doable, raise flag change.dNpart=true; for (auto &m : rit->Molecules2Add( !forward )) { // Delete auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { if( size(mollist)!=1 ) // There can be only one throw std::runtime_error("Bad definition: One group per atomic molecule!"); Change::data d; auto git = mollist.begin(); auto othermollist = otherspc->findMolecules(m.first, Tspace::ALL); // implies that new and old are in sync auto othergit=othermollist.begin(); d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group d.internal = true; d.dNpart = true; for ( int N=0; N<m.second; N++ ) { // deactivate m.second m.first atoms auto ait = slump.sample( git->begin(), git->end()); // iterator to random atom // Shuffle back to end, both in trial and new auto nait = git->end()-1; //iterator to last atom int dist = Faunus::distance( ait, git->end() ); // distance to random atom from end if ( Faunus::distance( ait, nait) > 1 ) { std::iter_swap(ait, nait); std::iter_swap(othergit->end()-dist-N, othergit->end() - (1+N) ); } d.atoms.push_back ( Faunus::distance(git->begin(), nait) ); git->deactivate( nait, git->end()); } std::sort( d.atoms.begin(), d.atoms.end() ); change.groups.push_back( d ); // add to list of moved groups } else { mollist = spc.findMolecules( m.first, Tspace::ACTIVE); for ( int N=0; N <m.second; N++ ) { Change::data d; auto git = slump.sample(mollist.begin(), mollist.end()); git->deactivate( git->begin(), git->end()); d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups mollist = spc.findMolecules( m.first , Tspace::ACTIVE); // Activate/deactivate all? simply move end to front? } } } for (auto &m : rit->Molecules2Add( forward )) { // Add auto mollist = spc.findMolecules( m.first, Tspace::ALL); if ( molecules<Tpvec>[m.first].atomic ) { Change::data d; auto git = mollist.begin(); d.index = Faunus::distance( spc.groups.begin(), git); d.internal = true; d.dNpart = true; for ( int N=0; N<m.second; N++ ) { // activate m.second m.first atoms git->activate( git->end(), git->end() + 1); auto ait = git->end()-1; spc.geo.randompos(ait->pos, slump); spc.geo.boundaryFunc(ait->pos); d.atoms.push_back( Faunus::distance(git->begin(), ait) ); // index of particle rel. to group } std::sort( d.atoms.begin(), d.atoms.end()); change.groups.push_back( d ); // add to list of moved groups } else { mollist = spc.findMolecules( m.first, Tspace::INACTIVE); if ( size(mollist) < m.second ) { change.dNpart=false; return; // Not possible to perform change, escape through the back door } for ( int N=0; N <m.second; N++ ) { Change::data d; auto git = slump.sample(mollist.begin(), mollist.end()); git->activate( git->inactive().begin(), git->inactive().end()); Point oldcm = git->cm; spc.geo.randompos(oldcm, random); git->translate( oldcm, spc.geo.boundaryFunc ); oldcm = ranunit(slump); Eigen::Quaterniond Q( Eigen::AngleAxisd(2*pc::pi*random(), oldcm) ); git->rotate(Q, spc.geo.boundaryFunc); d.index = Faunus::distance( spc.groups.begin(), git ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups mollist = spc.findMolecules( m.first , Tspace::INACTIVE); } } } std::sort(change.groups.begin(), change.groups.end() ); } else throw std::runtime_error("No reactions in list, disable speciation or add reactions"); } double bias(Change &change, double uold, double unew) override { if (forward) return -log_k*std::log(10); return log_k*std::log(10); } //!< adds extra energy change not captured by the Hamiltonian void _accept(Change &change) override { accmap[ trialprocess->name ] += 1; trialprocess->N_reservoir += (forward == true) ? -1 : 1; if( trialprocess->N_reservoir < 0 && trialprocess->canonic == true ) throw std::runtime_error("There are no negative number of molecules"); } void _reject(Change &change) override { accmap[ trialprocess->name ] += 0; } }; // End of class SpeciationMove template<typename Tspace> class Cluster : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; Tspace& spc; Average<double> msqd, msqd_angle, N; double thresholdsq=0, dptrans=0, dprot=0, angle=0, _bias=0; Point dir={1,1,1}, dp; std::vector<std::string> names; std::vector<int> ids; std::vector<size_t> index; // all possible molecules to move void _to_json(json &j) const override { using namespace u8; j = { {"threshold", std::sqrt(thresholdsq)}, {"dir", dir}, {"dp", dptrans}, {"dprot", dprot}, {rootof + bracket("r" + squared), std::sqrt(msqd.avg())}, {rootof + bracket(theta + squared) + "/" + degrees, std::sqrt(msqd_angle.avg()) / 1.0_deg}, {bracket("N"), N.avg()} }; _roundjson(j,3); } void _from_json(const json &j) override { dptrans = j.at("dp"); dir = j.value("dir", Point(1,1,1)); dprot = j.at("dprot"); thresholdsq = std::pow(j.at("threshold").get<double>(), 2); names = j.at("molecules").get<decltype(names)>(); // molecule names ids = names2ids(molecules<Tpvec>, names); // names --> molids index.clear(); for (auto &g : spc.groups) if (!g.atomic) if (std::find(ids.begin(), ids.end(), g.id)!=ids.end() ) index.push_back( &g-&spc.groups.front() ); if (repeat<0) repeat = index.size(); } void findCluster(Tspace &spc, size_t first, std::set<size_t>& cluster) { std::set<size_t> pool(index.begin(), index.end()); cluster.clear(); cluster.insert(first); pool.erase(first); size_t n; do { // find cluster (not very clever...) n = cluster.size(); for (size_t i : cluster) if (!spc.groups[i].empty()) // check if group is inactive for (size_t j : pool) if (!spc.groups[j].empty()) // check if group is inactive if (i!=j) if (spc.geo.sqdist(spc.groups[i].cm, spc.groups[j].cm)<=thresholdsq) { cluster.insert(j); pool.erase(j); } } while (cluster.size()!=n); // check if cluster is too large double max = spc.geo.getLength().minCoeff()/2; for (auto i : cluster) for (auto j : cluster) if (j>i) if (spc.geo.sqdist(spc.groups[i].cm, spc.groups[j].cm)>=max*max) throw std::runtime_error(name+": cluster larger than half box length"); } void _move(Change &change) override { if (thresholdsq>0 && !index.empty()) { std::set<size_t> cluster; // all group index in cluster size_t first = *slump.sample(index.begin(), index.end()); // random molecule (nuclei) findCluster(spc, first, cluster); // find cluster around first N += cluster.size(); // average cluster size Change::data d; d.all=true; dp = 0.5*ranunit(slump).cwiseProduct(dir) * dptrans; angle = dprot * (slump()-0.5); Point COM = Geometry::trigoCom(spc, cluster); // cluster center Eigen::Quaterniond Q; Q = Eigen::AngleAxisd(angle, ranunit(slump)); // quaternion for (auto i : cluster) { // loop over molecules in cluster auto &g = spc.groups[i]; Geometry::rotate(g.begin(), g.end(), Q, spc.geo.boundaryFunc, -COM); g.cm = g.cm-COM; spc.geo.boundary(g.cm); g.cm = Q*g.cm+COM; spc.geo.boundary(g.cm); g.translate( dp, spc.geo.boundaryFunc ); d.index=i; change.groups.push_back(d); } _bias += 0; // one may add bias here... #ifndef NDEBUG Point newCOM = Geometry::trigoCom(spc, cluster); double _zero = std::sqrt( spc.geo.sqdist(COM,newCOM) ) - dp.norm(); if (fabs(_zero)>1) std::cerr << _zero << " "; #endif } } double bias(Change &change, double uold, double unew) override { return _bias; } //!< adds extra energy change not captured by the Hamiltonian void _reject(Change &change) override { msqd += 0; msqd_angle += 0; } void _accept(Change &change) override { msqd += dp.squaredNorm(); msqd_angle += angle*angle; } public: Cluster(Tspace &spc) : spc(spc) { cite = "doi:10/cj9gnn"; name = "cluster"; repeat = -1; // meaning repeat N times } }; template<typename Tspace> class Pivot : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; std::vector<std::reference_wrapper<const Potential::BondData>> bonds; std::vector<int> index; // atom index to rotate Tspace& spc; std::string molname; int molid; double dprot; double d2; // cm movement, squared Average<double> msqd; // cm mean squared displacement void _to_json(json &j) const override { using namespace u8; j = { {"molecule", molname}, {"dprot", dprot}, {u8::rootof + u8::bracket("r_cm" + u8::squared), std::sqrt(msqd.avg())} }; _roundjson(j,3); } void _from_json(const json &j) override { dprot = j.at("dprot"); molname = j.at("molecule"); auto it = findName(molecules<Tpvec>, molname); if (it == molecules<Tpvec>.end()) throw std::runtime_error("unknown molecule '" + molname + "'"); molid = it->id(); bonds = Potential::filterBonds( molecules<Tpvec>[molid].bonds, Potential::BondData::harmonic); if (repeat<0) { auto v = spc.findMolecules(molid); repeat = std::distance(v.begin(), v.end()); // repeat for each molecule... if (repeat>0) repeat *= bonds.size(); } } void _move(Change &change) override { d2=0; if (std::fabs(dprot)>1e-9) { auto it = spc.randomMolecule(molid, slump); if (it!=spc.groups.end()) if (it->size()>2) { auto b = slump.sample(bonds.begin(), bonds.end()); // random harmonic bond if (b != bonds.end()) { int i1 = b->get().index.at(0); int i2 = b->get().index.at(1); int offset = std::distance( spc.p.begin(), it->begin() ); index.clear(); if (slump()>0.0) for (size_t i=i2+1; i<it->size(); i++) index.push_back(i+offset); else for (int i=0; i<i1; i++) index.push_back(i+offset); i1+=offset; i2+=offset; if (!index.empty()) { Point oldcm = it->cm; it->unwrap(spc.geo.distanceFunc); // remove pbc Point u = (spc.p[i1].pos - spc.p[i2].pos).normalized(); double angle = dprot * (slump()-0.5); Eigen::Quaterniond Q( Eigen::AngleAxisd(angle, u) ); auto M = Q.toRotationMatrix(); for (auto i : index) { spc.p[i].rotate(Q, M); // internal rot. spc.p[i].pos = Q * ( spc.p[i].pos - spc.p[i1].pos) + spc.p[i1].pos; // positional rot. } it->cm = Geometry::massCenter(it->begin(), it->end()); it->wrap(spc.geo.boundaryFunc); // re-apply pbc d2 = spc.geo.sqdist(it->cm, oldcm); // CM movement Change::data d; d.index = Faunus::distance( spc.groups.begin(), it ); // integer *index* of moved group d.all = true; // *all* atoms in group were moved change.groups.push_back( d ); // add to list of moved groups } } } } } void _accept(Change &change) override { msqd += d2; } void _reject(Change &change) override { msqd += 0; } public: Pivot(Tspace &spc) : spc(spc) { name = "pivot"; repeat = -1; // --> repeat=N } }; //!< Pivot move around random harmonic bond axis #ifdef ENABLE_MPI /** * @brief Class for parallel tempering (aka replica exchange) using MPI * * Although not completely correct, the recommended way of performing a temper move * is to do `N` Monte Carlo passes with regular moves and then do a tempering move. * This is because the MPI nodes must be in sync and if you have a system where * the random number generator calls are influenced by the Hamiltonian we could * end up in a deadlock. * * @date Lund 2012, 2018 */ template<class Tspace> class ParallelTempering : public Movebase { private: typedef typename Tspace::Tpvec Tpvec; typedef typename Tspace::Tparticle Tparticle; Tspace& spc; // Space to operate on MPI::MPIController& mpi; int partner; //!< Exchange replica (partner) enum extradata {VOLUME=0}; //!< Structure of extra data to send std::map<std::string, Average<double>> accmap; MPI::FloatTransmitter ft; //!< Class for transmitting floats over MPI MPI::ParticleTransmitter<Tpvec> pt;//!< Class for transmitting particles over MPI void findPartner() { int dr=0; partner = mpi.rank(); (mpi.random()>0.5) ? dr++ : dr--; (mpi.rank() % 2 == 0) ? partner+=dr : partner-=dr; } //!< Find replica to exchange with bool goodPartner() { assert(partner!=mpi.rank() && "Selfpartner! This is not supposed to happen."); if (partner>=0) if ( partner<mpi.nproc() ) if ( partner!=mpi.rank() ) return true; return false; } //!< Is partner valid? void _to_json(json &j) const override { j = { { "replicas", mpi.nproc() }, { "datasize", pt.getFormat() } }; json &_j = j["exchange"]; _j = json::object(); for (auto &m : accmap) _j[m.first] = { {"attempts", m.second.cnt}, {"acceptance", m.second.avg()} }; } void _move(Change &change) override { double Vold = spc.geo.getVolume(); findPartner(); Tpvec p; // temperary storage p.resize(spc.p.size()); if (goodPartner()) { change.all=true; pt.sendExtra[VOLUME]=Vold; // copy current volume for sending pt.recv(mpi, partner, p); // receive particles pt.send(mpi, spc.p, partner); // send everything pt.waitrecv(); pt.waitsend(); double Vnew = pt.recvExtra[VOLUME]; if (Vnew<1e-9 || spc.p.size() != p.size()) MPI_Abort(mpi.comm, 1); if (std::fabs(Vnew-Vold)>1e-9) change.dV=true; spc.p = p; spc.geo.setVolume(Vnew); // update mass centers for (auto& g : spc.groups) if (g.atomic==false) g.cm = Geometry::massCenter(g.begin(), g.end(), spc.geo.boundaryFunc, -g.begin()->pos); } } double exchangeEnergy(double mydu) { std::vector<MPI::FloatTransmitter::floatp> duSelf(1), duPartner; duSelf[0]=mydu; duPartner = ft.swapf(mpi, duSelf, partner); return duPartner.at(0); // return partner energy change } //!< Exchange energy with partner double bias(Change &change, double uold, double unew) override { return exchangeEnergy(unew-uold); // Exchange dU with partner (MPI) } std::string id() { std::ostringstream o; if (mpi.rank() < partner) o << mpi.rank() << " <-> " << partner; else o << partner << " <-> " << mpi.rank(); return o.str(); } //!< Unique string to identify set of partners void _accept(Change &change) override { if ( goodPartner() ) accmap[ id() ] += 1; } void _reject(Change &change) override { if ( goodPartner() ) accmap[ id() ] += 0; } void _from_json(const json &j) override { pt.setFormat( j.value("format", std::string("XYZQI") ) ); } public: ParallelTempering(Tspace &spc, MPI::MPIController &mpi ) : spc(spc), mpi(mpi) { name="temper"; partner=-1; pt.recvExtra.resize(1); pt.sendExtra.resize(1); } }; #endif template<typename Tspace> class Propagator : public BasePointerVector<Movebase> { private: int _repeat; std::discrete_distribution<> dist; std::vector<double> w; // list of weights for each move void addWeight(double weight=1) { w.push_back(weight); dist = std::discrete_distribution<>(w.begin(), w.end()); _repeat = int(std::accumulate(w.begin(), w.end(), 0.0)); } public: using BasePointerVector<Movebase>::vec; inline Propagator() {} inline Propagator(const json &j, Tspace &spc, MPI::MPIController &mpi) { if (j.count("random")==1) Movebase::slump = j["random"]; // slump is static --> shared for all moves for (auto &m : j.at("moves")) {// loop over move list size_t oldsize = vec.size(); for (auto it=m.begin(); it!=m.end(); ++it) { try { #ifdef ENABLE_MPI if (it.key()=="temper") this->template push_back<Move::ParallelTempering<Tspace>>(spc, mpi); #endif if (it.key()=="moltransrot") this->template push_back<Move::TranslateRotate<Tspace>>(spc); if (it.key()=="quadrantjump") this->template push_back<Move::QuadrantJump<Tspace>>(spc); if (it.key()=="transrot") this->template push_back<Move::AtomicTranslateRotate<Tspace>>(spc); if (it.key()=="pivot") this->template push_back<Move::Pivot<Tspace>>(spc); if (it.key()=="volume") this->template push_back<Move::VolumeMove<Tspace>>(spc); if (it.key()=="speciation") this->template push_back<Move::SpeciationMove<Tspace>>(spc); if (it.key()=="cluster") this->template push_back<Move::Cluster<Tspace>>(spc); if (vec.size()==oldsize+1) { vec.back()->from_json( it.value() ); addWeight(vec.back()->repeat); } else std::cerr << "warning: ignoring unknown move '" << it.key() << "'" << endl; } catch (std::exception &e) { throw std::runtime_error("Error adding move '" + it.key() + "': " + e.what()); } } } } int repeat() { return _repeat; } auto sample() { if (!vec.empty()) { assert(w.size() == vec.size()); return vec.begin() + dist( Move::Movebase::slump.engine ); } return vec.end(); } //!< Pick move from a weighted, random distribution }; }//Move namespace template<class Tgeometry, class Tparticle> class MCSimulation { private: typedef Space<Tgeometry, Tparticle> Tspace; typedef typename Tspace::Tpvec Tpvec; bool metropolis(double du) const { if (std::isnan(du)) return false; if (du<0) return true; return ( Move::Movebase::slump() > std::exp(-du)) ? false : true; } //!< Metropolis criterion (true=accept) struct State { Tspace spc; Energy::Hamiltonian<Tspace> pot; State(const json &j) : spc(j), pot(spc,j) {} void sync(State &other, Change &change) { spc.sync( other.spc, change ); pot.sync( &other.pot, change ); } }; //!< Contains everything to describe a state State state1, // old state state2; // new state (trial); double uinit=0, dusum=0; Average<double> uavg; void init() { state1.pot.key = Energy::Energybase::OLD; // this is the old energy (current) state2.pot.key = Energy::Energybase::NEW; // this is the new energy (trial) state1.pot.init(); state2.pot.init(); dusum=0; Change c; c.all=true; state2.sync(state1, c); uinit = state1.pot.energy(c); // Hack in reference to state1 in speciation for (auto base : moves.vec) { auto derived = std::dynamic_pointer_cast<Move::SpeciationMove<Tspace>>(base); if (derived) derived->setOther(state1.spc); } assert(state1.pot.energy(c) == state2.pot.energy(c)); } public: Move::Propagator<Tspace> moves; auto& pot() { return state1.pot; } auto& space() { return state1.spc; } const auto& pot() const { return state1.pot; } const auto& space() const { return state1.spc; } const auto& geometry() const { return state1.spc.geo; } const auto& particles() const { return state1.spc.p; } double drift() { Change c; c.all=true; double ufinal = state1.pot.energy(c); return ( ufinal-(uinit+dusum) ) / uinit; } //!< Calculates the relative energy drift from initial configuration MCSimulation(const json &j, MPI::MPIController &mpi) : state1(j), state2(j), moves(j, state2.spc, mpi) { init(); } void store(json &j) const { j = state1.spc; j["random-move"] = Move::Movebase::slump; j["random-global"] = Faunus::random; } // store system to json object void restore(const json &j) { state1.spc = j; state2.spc = j; Move::Movebase::slump = j["random-move"]; // restore move random number generator Faunus::random = j["random-global"]; // restore global random number generator //reactions<Tpvec> = j.at("reactionlist").get<decltype(reactions<Tpvec>)>(); // should be handled by space init(); } //!< restore system from previously store json object void move() { Change change; for (int i=0; i<moves.repeat(); i++) { auto mv = moves.sample(); // pick random move if (mv != moves.end() ) { change.clear(); (**mv).move(change); if (!change.empty()) { double unew, uold, du; #pragma omp parallel sections { #pragma omp section { unew = state2.pot.energy(change); } #pragma omp section { uold = state1.pot.energy(change); } } du = unew - uold; double bias = (**mv).bias(change, uold, unew) + Nchem( state2.spc, state1.spc , change); if ( metropolis(du + bias) ) { // accept move state1.sync( state2, change ); (**mv).accept(change); } else { // reject move state2.sync( state1, change ); (**mv).reject(change); du=0; } dusum+=du; // sum of all energy changes } } } } void to_json(json &j) { j = state1.spc.info(); j["temperature"] = pc::temperature / 1.0_K; j["moves"] = moves; j["energy"].push_back(state1.pot); } }; template<class Tgeometry, class Tparticle> void to_json(json &j, MCSimulation<Tgeometry,Tparticle> &mc) { mc.to_json(j); } /** * @brief add documentation..... * * @f[ * \beta U = \ln ( \sum N_o!/N_n! \exp([N_n - N_o]\beta \mu) V^{N_n - N_o} ) * @f] * * @todo * - Rename to something more descriptive * - use exception message to suggest how to fix the problem */ template<typename Tspace> double Nchem( Tspace &spc_n, Tspace &spc_o, const Change &change) { double NoverO=0; if ( change.dNpart ) {// Have the number of any molecules changed for ( auto &m : change.groups ) { int N_o = 0; int N_n = 0; if ( !m.dNpart && !molecules<std::vector<typename Tspace::Tparticle>>[ spc_n.groups[m.index].id ].atomic) { // Molecular species auto mollist_n = spc_n.findMolecules(m.index, Tspace::ACTIVE); auto mollist_o = spc_o.findMolecules(m.index, Tspace::ACTIVE); N_n=size(mollist_n); N_o=size(mollist_o); } if ( m.dNpart ) { auto mollist_n = spc_n.findMolecules(spc_n.groups[m.index].id, Tspace::ALL); if ( size(mollist_n) > 1 ) throw std::runtime_error("Bad definition: One group per atomic molecule!"); auto mollist_o = spc_o.findMolecules(spc_o.groups[m.index].id, Tspace::ALL); if ( size(mollist_o) > 1 ) throw std::runtime_error("Bad definition: One group per atomic molecule!"); // Below is safe due to the catches above // add consistency criteria with m.atoms.size() == N N_n = mollist_n.begin()->size(); N_o = mollist_o.begin()->size(); } int dN = N_n - N_o; if (dN!=0) { double V_n = spc_n.geo.getVolume(); double V_o = spc_o.geo.getVolume(); double betamu = molecules<std::vector<typename Tspace::Tparticle>>[ spc_n.groups[m.index].id ].activity; if (std::fabs(betamu) < 1e-20) betamu = std::log( betamu / 1.0_molar ); if (dN>0) for (int n=0; n < dN; n++) NoverO += -std::log( (N_o + 1 + n) / ( V_n * 1.0_molar )) + betamu; else if (dN<0) for (int n=0; n < (-dN); n++) NoverO += std::log( (N_o - n) / ( V_n * 1.0_molar )) - betamu; } } } return -NoverO; // negative sign since Pref exp{-beta(dU)} = exp{-beta(dU -ln(Pref)} } }//Faunus namespace
vla-5.c
// { dg-do compile } /* { dg-require-effective-target alloca } */ void foo(int n, int i) { int A[n]; #pragma omp parallel sections lastprivate(A) { A[i] = 1; } }
GB_unaryop__identity_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_uint32 // op(A') function: GB_tran__identity_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_uint32 ( bool *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isle_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8) // A*D function (colscale): GB (_AxD__isle_uint8) // D*A function (rowscale): GB (_DxB__isle_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8) // C=scalar+B GB (_bind1st__isle_uint8) // C=scalar+B' GB (_bind1st_tran__isle_uint8) // C=A+scalar GB (_bind2nd__isle_uint8) // C=A'+scalar GB (_bind2nd_tran__isle_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 64 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 16 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
GB_unaryop__abs_fp64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp64_int16 // op(A') function: GB_tran__abs_fp64_int16 // C type: double // A type: int16_t // cast: double cij = (double) aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CASTING(z, aij) \ double z = (double) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp64_int16 ( double *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
correlationOMP.c
/** @file @author DINH Viet Huy Hubert <dinh@ifrec.osaka-u.ac.jp> @version 1.0 @section DESCRIPTION Calculate correlation values of vectors against each others in a list of vectors **/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <omp.h> #include "correlation.h" static size_t getIndex(const size_t x,const size_t y,const size_t n) { size_t k = ( n * ( n - 1 ) / 2 ) - ( ( n - x ) * ( n - x - 1 ) / 2 ) + y - x - 1; return k; } extern float* getCorrelation(const float* vectors,const size_t cols,const size_t rows) { /* Correlation calculation derived from the R project implementation */ float preSum[rows]; /* Precomputed vectors terms sum */ float preS23[rows]; /* Precomputed S2 S3 values */ float fCols= cols; fprintf(stderr,"start\n"); fprintf(stderr,"precomputing sums\n"); #pragma omp parallel for for(size_t i=0;i<rows;i++) { /* precomputing sums */ size_t base=i*cols; float sum=0.0; float sum2=0.0; for(size_t j=0;j<cols;j++) { float x=vectors[base+j]; sum += x; sum2 += x*x; } preSum[i]=sum; preS23[i]=(sum2*fCols) - (sum*sum); } fprintf(stderr, "done\n"); fprintf(stderr, "calculate correlation values\n"); size_t totalSize=(rows)*(rows-1)/2; /* number of value in the flat vector storage of the triangle matrix*/ fprintf(stderr,"%zu (%zu)\n",totalSize,totalSize*sizeof(float)); float *output=malloc(sizeof(float)*totalSize); assert(output!=NULL); if(output==NULL) { fprintf(stderr,"not enough memory\n"); abort(); //return(NULL); } //size_t index=0; /* position set in the vector to write to */ #pragma omp parallel for for(size_t i=0;i<rows;i++) { size_t baseX=i*cols; float sumX = preSum[i]; float s2=preS23[i]; /*#pragma master { fprintf(stderr,"%d threads \n",omp_get_num_threads()); }*/ #pragma omp parallel for for(size_t j=i+1;j<rows;j++) { size_t baseY=j*cols; float sumXY = 0.0; for(size_t k=0;k<cols;k++) { float a=vectors[baseX+k]; float b=vectors[baseY+k]; sumXY += a*b; } float sumY = preSum[j]; float s3 = preS23[j]; float s1 = (sumXY*fCols) - (sumX*sumY); float s4 = sqrtf(s2*s3); float correlation = s1/s4; //assert(index<totalSize); /*if(index % (totalSize/100) == 1) { fprintf(stderr,"%zu%%\r",100*index/totalSize); }*/ size_t index=getIndex(i,j,rows); //#pragma omp critical output[index] = correlation; } } fprintf(stderr,"\nend\n"); return(output); }
GB_unaryop__identity_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp32_fp32 // op(A') function: GB_tran__identity_fp32_fp32 // C type: float // A type: float // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp32_fp32 ( float *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sudoku-omp-priority.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include <math.h> // get the size of elements on an array #define NELEMS(x) (sizeof(x) / sizeof((x)[0])) typedef int bool; #define false 0 #define true 1 struct Puzzle { int row; int col; int depth; long double priority; int root_n; int n; int ** matrix; }; typedef struct Puzzle Puzzle; static int _states_searched_ = 0; /////////////////////////////////////////////////////////// //// Priority Queue /////////////////////////////////////////////////////////// // Node typedef struct node { Puzzle * puzzle; struct node* next; } Node; // Function to Create A New Node Node* newNode(Puzzle * puzzle) { Node* temp = (Node*)malloc(sizeof(Node)); temp->puzzle = puzzle; temp->next = NULL; return temp; } // Return the value at head /** * Return the value at head of the queue * @param head The head of the queue * @return puzzle A pointer to a Sudoku structure */ Puzzle * peek(Node** head) { return (*head)->puzzle; } /** * Removes the element with the highest priority form the list * @param head The head of the queue */ void pop(Node** head) { Node* temp = *head; (*head) = (*head)->next; free(temp); } /** * Function to push a Sudoku structure according to its priority * @param head The head of the queue * @param puzzle A pointer to a Sudoku structure */ void push(Node** head, Puzzle * puzzle) { Node* start = (*head); // Create new Node Node* temp = newNode(puzzle); // Special Case: The head of list has lesser // priority than new node. So insert new // node before head node and change head node. if ((*head)->puzzle->priority > puzzle->priority) { // Insert New Node before head temp->next = *head; (*head) = temp; } else { // Traverse the list and find a // position to insert new node while (start->next != NULL && start->next->puzzle->priority < puzzle->priority) { start = start->next; } // Either at the ends of the list // or at required position temp->next = start->next; start->next = temp; } } /** * Function to check if the list is empty * @param head The head of the queue * @return Returns true if the list is empty */ int isEmpty(Node** head) { return (*head) == NULL; } /////////////////////////////////////////////////////////// //// Global Variables /////////////////////////////////////////////////////////// bool solution_found = false; // no solution found at the beginning Node* queue; /** * Check if number is already in a sub grid of the puzzle matrix. * @param row Row. * @param col Column. * @param num Comparison value. * @return Returns true if the number is inside one of the sub-grids of the matrix. */ bool check_grid(Puzzle * puzzle, int row, int col, int num){ int i, j; int count = 0; for (i = 0; i < puzzle->root_n; ++i){ for (j = 0; j < puzzle->root_n; ++j){ if (puzzle->matrix[i + row][j + col] == num){ count++; if(count > 1) { return true; } } } } return false; } /** * Check if a number is already in a column. * @param puzzle Sudoku puzzle data structure. * @param col Column. * @param num Comparison value. * @return Returns true if the number is in the column. */ bool check_column(Puzzle * puzzle, int col, int num){ int i; int count = 0; for (i = 0; i < puzzle->n; ++i){ if(puzzle->matrix[i][col] == num){ count++; if(count > 1) { return true; } } } return false; } /** * Check if a number is already in a row. * @param puzzle Sudoku puzzle data structure. * @param row Row. * @param num Comparison value. * @return Returns true if the number is in the row. */ bool check_row(Puzzle * puzzle, int row, int num){ int i; int count = 0; for (i = 0; i < puzzle->n; ++i){ if (puzzle->matrix[row][i] == num){ count++; if(count > 1) { return true; } } } return false; } /** * Check if a number is already in a matrix cell according to sudoku rules. * @param puzzle A pointer to a Sudoku puzzle data structure. * @param row Row. * @param col Column. * @param num Comparison value. * @return Returns true if the number is not valid. */ bool is_valid(Puzzle * puzzle, int row, int col, int num){ return !(check_row(puzzle,row, num)) && !(check_column(puzzle,col, num)) && !(check_grid(puzzle, row - row % puzzle->root_n, col - col % puzzle->root_n, num)); } /** * Print the puzzle matrix. * @param puzzle A pointer to a Sudoku puzzle data structure. */ void debug_puzzle(Puzzle * puzzle){ #pragma omp critical { if (puzzle != NULL) { int n = puzzle->n; int slots = (n + n - 1); char buffer[slots + 1]; memset(buffer, '-', slots); // init all positions with '-' buffer[slots] = '\0'; // define end of string printf("%s\n",buffer); printf("Puzzle:\n"); printf("-depth: %d\n", puzzle->depth); printf("-row: %d\n", puzzle->row); printf("-col: %d\n", puzzle->col); //printf("-priority: %Lf\n", puzzle->priority); printf("%s\n",buffer); int i, j; for (i = 0; i < n; ++i){ for (j = 0; j < n; ++j){ printf("%d ", puzzle->matrix[i][j]); } printf("\n"); } printf("%s\n",buffer); } } } Puzzle * copy(Puzzle * puzzle) { if (puzzle == NULL) { return NULL; } Puzzle * copyPuzzle = malloc(sizeof(Puzzle)); copyPuzzle->depth = puzzle->depth; copyPuzzle->priority = puzzle->priority; copyPuzzle->row = puzzle->row; copyPuzzle->col = puzzle->col; copyPuzzle->root_n = puzzle->root_n; copyPuzzle->n = puzzle->n; copyPuzzle->matrix = (int**) malloc(puzzle->n * sizeof(int*)); // alloc space for matrix int i,j; // manual copy for (i = 0; i < puzzle->n; ++i){ copyPuzzle->matrix[i] = (int * )malloc(puzzle->n * sizeof(int)); // alloc space for (j = 0; j < puzzle->n; ++j){ copyPuzzle->matrix[i][j] = puzzle->matrix[i][j]; // copy values } } return copyPuzzle; } /** * Find empty cell in the sudoku. * @param puzzle A Pointer to a Sudoku puzzle data structure. * @param row Row number reference * @param col Column number reference * @return Returns true if the puzzle has an empty position. */ bool find_empty(Puzzle * puzzle, int * row, int * col){ for (*row = 0; *row < puzzle->n; (*row)++){ for (*col = 0; *col < puzzle->n; (*col)++){ if (puzzle->matrix[*row][*col] == 0){ return true; } } } return false; } /** * Generates the successors for a given puzzle * @param puzzle Sudoku puzzle data structure. * @return Returns true if there were sucessors, false otherwise */ bool generateSucessors(Puzzle * puzzle) { int n = puzzle->n; int pos, row, col, i; bool found = find_empty(puzzle, &row, &col); if(!found) { return false; } for(i = 1; i <= n; i++) { Puzzle * sucessor = copy (puzzle); sucessor->matrix[row][col] = i; sucessor->depth = puzzle->depth + 1; sucessor->row = row; sucessor->col = col; long double offset = powl((long double) n, (long double) n * n + 2 - sucessor->depth); sucessor->priority = ((long double)i) * offset + puzzle->priority; #pragma omp critical { if(queue == NULL) { queue = newNode(sucessor); } else { push(&queue, sucessor); } } } return true; } /** * Free's a Sudoku puzzle structure * @param puzzle A pointer to a Sudoku puzzle data structure. */ void cleanPuzzle (Puzzle * puzzle) { if (puzzle != NULL) { int n = puzzle->n; int i; // Free memory for (i = 0; i < n ; i++){ free(puzzle->matrix[i]); } free(puzzle->matrix); free(puzzle); } } /** * Solves a Sudoku puzzle in parallel * @param puzzle A pointer to a Sudoku puzzle data structure. */ void solve(Puzzle * puzzle) { queue = newNode(puzzle); #pragma omp parallel { do { Puzzle * puzzle; #pragma omp critical { if(!isEmpty(&queue)) { puzzle = peek(&queue); pop(&queue); _states_searched_ ++; } else { puzzle = NULL; queue = NULL; } } if(puzzle != NULL) { int row = puzzle->row; int col = puzzle->col; bool isValid = is_valid(puzzle, row, col, puzzle->matrix[row][col]); //printf("%d %d %d %d %d\n", puzzle->depth, row, col, puzzle->matrix[row][col], isValid); if(puzzle->depth == 1 || isValid) { //debug_puzzle(puzzle); bool hasSucessors = generateSucessors(puzzle); //This is necessary because we can't assign a false to solution_found concurrently if(!hasSucessors) { solution_found = true; printf("--------SOLUTION--------\n"); debug_puzzle(puzzle); /* //Releasing the queue takes a long time #pragma omp critical { //release the queue while(!isEmpty(&queue)) { pop(&queue); } } */ } } cleanPuzzle(puzzle); } } while(!solution_found); } } int main(int argc, char *argv[]){ double start = omp_get_wtime(); FILE * file_input; FILE * file_output; char * filename; // Check if file path was passed as an argument if (argc > 2){ printf("ERROR: Too many arguments.\n"); exit(EXIT_FAILURE); } else if (argc < 2) { printf("ERROR: Missing arguments.\n"); exit(EXIT_FAILURE); } filename = argv[1]; // Open file in read mode if ((file_input = fopen(filename,"r")) == NULL){ printf("ERROR: Could not open file %s\n",filename); exit(EXIT_FAILURE); } // Number of rows and columns int n; // Square root of n int root_n; // Read first line from the file to get n fscanf(file_input, "%d\n", &root_n); n = root_n * root_n; // ====================================== /** Initialize puzzle data structure */ // Puzzle matrix N x N Puzzle * puzzle = malloc(sizeof(Puzzle)); puzzle->n = n; puzzle->root_n = root_n; puzzle->row = 0; puzzle->col = 0; puzzle->depth = 1; puzzle->priority = 0; puzzle->matrix = (int**) malloc(n * sizeof(int*)); int i; for (i = 0; i < n; ++i){ puzzle->matrix[i] = (int * )malloc(n * sizeof(int)); } // Read matrix from the file int cursor; int row = 0, col = 0, j; for (i = 0; i < n; ++i){ for (j = 0; j < n; ++j){ fscanf(file_input,"%d",&puzzle->matrix[i][j]); } fscanf(file_input, "\n"); } // ====================================== // Close file fclose(file_input); ////////////////////////////////////////////////////////// ////// START ////////////////////////////////////////////////////////// solve(puzzle); ////////////////////////////////////////////////////////// ////// END ////////////////////////////////////////////////////////// /* if(solve(puzzle)){ // Write solution to .out file. char * name_out; // Split file name filename[strlen(filename) - 3 ] = '\0'; name_out = (char *) malloc(sizeof(char) * (strlen(filename) + 4)); strcpy(name_out, filename); strcat(name_out, "serial.out"); // Open file in write mode file_output = fopen(name_out, "w"); print_puzzle(file_output, puzzle); // Close output file fclose(file_output); } else { printf("No solution\n"); } */ double end = omp_get_wtime(); printf("Searched %d states in total.\n", _states_searched_); printf("Elapsed time: %f (s)\n", end - start); return EXIT_SUCCESS; }
radial_integrals.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file radial_integrals.h * * \brief Representation of various radial integrals. */ #ifndef __RADIAL_INTEGRALS_H__ #define __RADIAL_INTEGRALS_H__ #include "sbessel.h" namespace sirius { class Radial_integrals { private: /// Basic parameters. Simulation_parameters const& param_; /// Unit cell. Unit_cell const& unit_cell_; /// Linear grid up to |G+k|_{max} Radial_grid grid_gkmax_; /// Linear grid up to |G|_{max} Radial_grid grid_gmax_; /// Linear grid up to |G|_{max} for radial integrals of local potential. Radial_grid grid_gmax_vloc_; mdarray<Spline<double>, 3> aug_radial_integrals_; /// Beta-projector radial integrals. mdarray<Spline<double>, 2> beta_radial_integrals_; mdarray<Spline<double>, 2> beta_djldq_radial_integrals_; mdarray<Spline<double>, 1> pseudo_core_radial_integrals_; mdarray<Spline<double>, 1> pseudo_rho_radial_integrals_; mdarray<Spline<double>, 1> vloc_radial_integrals_; inline void generate_pseudo_rho_radial_integrals() { PROFILE("sirius::Radial_integrals::generate_pseudo_rho_radial_integrals"); pseudo_rho_radial_integrals_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types()); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); pseudo_rho_radial_integrals_(iat) = Spline<double>(grid_gmax_); Spline<double> rho(atom_type.radial_grid()); for (int ir = 0; ir < atom_type.num_mt_points(); ir++) { rho[ir] = atom_type.pp_desc().total_charge_density[ir]; } rho.interpolate(); #pragma omp parallel for for (int iq = 0; iq < grid_gmax_.num_points(); iq++) { Spherical_Bessel_functions jl(0, atom_type.radial_grid(), grid_gmax_[iq]); pseudo_rho_radial_integrals_(iat)[iq] = sirius::inner(jl[0], rho, 0, atom_type.num_mt_points()) / fourpi; } pseudo_rho_radial_integrals_(iat).interpolate(); } } /// Generate radial integrals for local part of pseudopotential. /** See Potential::generate_local_potential() for more details. */ inline void generate_vloc_radial_integrals() { PROFILE("sirius::Radial_integrals::generate_vloc_radial_integrals"); vloc_radial_integrals_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types()); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); vloc_radial_integrals_(iat) = Spline<double>(grid_gmax_vloc_); #pragma omp parallel for for (int iq = 0; iq < grid_gmax_vloc_.num_points(); iq++) { Spline<double> s(atom_type.radial_grid()); if (iq == 0) { for (int ir = 0; ir < atom_type.num_mt_points(); ir++) { double x = atom_type.radial_grid(ir); s[ir] = (x * atom_type.pp_desc().vloc[ir] + atom_type.zn()) * x; } vloc_radial_integrals_(iat)[iq] = s.interpolate().integrate(0); } else { double g = grid_gmax_vloc_[iq]; double g2 = std::pow(g, 2); for (int ir = 0; ir < atom_type.num_mt_points(); ir++) { double x = atom_type.radial_grid(ir); s[ir] = (x * atom_type.pp_desc().vloc[ir] + atom_type.zn() * gsl_sf_erf(x)) * std::sin(g * x); } vloc_radial_integrals_(iat)[iq] = (s.interpolate().integrate(0) / g - atom_type.zn() * std::exp(-g2 / 4) / g2); } } vloc_radial_integrals_(iat).interpolate(); } } inline void generate_pseudo_core_radial_integrals() { PROFILE("sirius::Radial_integrals::generate_pseudo_core_radial_integrals"); pseudo_core_radial_integrals_ = mdarray<Spline<double>, 1>(unit_cell_.num_atom_types()); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); pseudo_core_radial_integrals_(iat) = Spline<double>(grid_gmax_); Spline<double> ps_core(atom_type.radial_grid()); for (int ir = 0; ir < atom_type.num_mt_points(); ir++) { ps_core[ir] = atom_type.pp_desc().core_charge_density[ir]; } ps_core.interpolate(); #pragma omp parallel for for (int iq = 0; iq < grid_gmax_.num_points(); iq++) { Spherical_Bessel_functions jl(0, atom_type.radial_grid(), grid_gmax_[iq]); pseudo_core_radial_integrals_(iat)[iq] = sirius::inner(jl[0], ps_core, 2, atom_type.num_mt_points()); } pseudo_core_radial_integrals_(iat).interpolate(); } } inline void generate_aug_radial_integrals() { PROFILE("sirius::Radial_integrals::generate_aug_radial_integrals"); int nmax = unit_cell_.max_mt_radial_basis_size(); int lmax = unit_cell_.lmax(); /* interpolate <j_{l_n}(q*x) | Q_{xi,xi'}^{l}(x) > with splines */ aug_radial_integrals_ = mdarray<Spline<double>, 3>(nmax * (nmax + 1) / 2, 2 * lmax + 1, unit_cell_.num_atom_types()); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); if (!atom_type.pp_desc().augment) { continue; } /* number of radial beta-functions */ int nbrf = atom_type.mt_radial_basis_size(); /* maximum l of beta-projectors */ int lmax_beta = atom_type.indexr().lmax(); for (int l = 0; l <= 2 * lmax_beta; l++) { for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) { aug_radial_integrals_(idx, l, iat) = Spline<double>(grid_gmax_); } } /* interpolate Q-operator radial functions */ mdarray<Spline<double>, 2> qrf_spline(nbrf * (nbrf + 1) / 2, 2 * lmax_beta + 1); #pragma omp parallel for for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) { for (int l3 = 0; l3 <= 2 * lmax_beta; l3++) { qrf_spline(idx, l3) = Spline<double>(atom_type.radial_grid()); for (int ir = 0; ir < atom_type.num_mt_points(); ir++) { qrf_spline(idx, l3)[ir] = atom_type.pp_desc().q_radial_functions_l(ir, idx, l3); } qrf_spline(idx, l3).interpolate(); } } #pragma omp parallel for for (int iq = 0; iq < grid_gmax_.num_points(); iq++) { Spherical_Bessel_functions jl(2 * lmax_beta, atom_type.radial_grid(), grid_gmax_[iq]); for (int l3 = 0; l3 <= 2 * lmax_beta; l3++) { for (int idxrf2 = 0; idxrf2 < nbrf; idxrf2++) { int l2 = atom_type.indexr(idxrf2).l; for (int idxrf1 = 0; idxrf1 <= idxrf2; idxrf1++) { int l1 = atom_type.indexr(idxrf1).l; int idx = idxrf2 * (idxrf2 + 1) / 2 + idxrf1; if (l3 >= std::abs(l1 - l2) && l3 <= (l1 + l2) && (l1 + l2 + l3) % 2 == 0) { aug_radial_integrals_(idx, l3, iat)[iq] = sirius::inner(jl[l3], qrf_spline(idx, l3), 0, atom_type.num_mt_points()); } } } } } for (int l = 0; l <= 2 * lmax_beta; l++) { for (int idx = 0; idx < nbrf * (nbrf + 1) / 2; idx++) { aug_radial_integrals_(idx, l, iat).interpolate(); } } } } inline void generate_beta_radial_integrals() { PROFILE("sirius::Radial_integrals::generate_beta_radial_integrals"); /* create space for <j_l(qr)|beta> radial integrals */ beta_radial_integrals_ = mdarray<Spline<double>, 2>(unit_cell_.max_mt_radial_basis_size(), unit_cell_.num_atom_types()); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); int nrb = atom_type.mt_radial_basis_size(); for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) { beta_radial_integrals_(idxrf, iat) = Spline<double>(grid_gkmax_); } /* interpolate beta radial functions */ std::vector<Spline<double>> beta_rf(nrb); for (int idxrf = 0; idxrf < nrb; idxrf++) { beta_rf[idxrf] = Spline<double>(atom_type.radial_grid()); int nr = atom_type.pp_desc().num_beta_radial_points[idxrf]; for (int ir = 0; ir < nr; ir++) { beta_rf[idxrf][ir] = atom_type.pp_desc().beta_radial_functions(ir, idxrf); } beta_rf[idxrf].interpolate(); } #pragma omp parallel for for (int iq = 0; iq < grid_gkmax_.num_points(); iq++) { Spherical_Bessel_functions jl(unit_cell_.lmax(), atom_type.radial_grid(), grid_gkmax_[iq]); for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) { int l = atom_type.indexr(idxrf).l; int nr = atom_type.pp_desc().num_beta_radial_points[idxrf]; /* compute \int j_l(q * r) beta_l(r) r^2 dr */ /* remeber that beta(r) are defined as miltiplied by r */ beta_radial_integrals_(idxrf, iat)[iq] = sirius::inner(jl[l], beta_rf[idxrf], 1, nr); } } for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) { beta_radial_integrals_(idxrf, iat).interpolate(); } } } inline void generate_beta_djldq_radial_integrals() { PROFILE("sirius::Radial_integrals::generate_beta_djldq_radial_integrals"); /* create space for <j_l(qr)|beta> radial integrals */ beta_djldq_radial_integrals_ = mdarray<Spline<double>, 2>(unit_cell_.max_mt_radial_basis_size(), unit_cell_.num_atom_types()); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { auto& atom_type = unit_cell_.atom_type(iat); int nrb = atom_type.mt_radial_basis_size(); for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) { beta_djldq_radial_integrals_(idxrf, iat) = Spline<double>(grid_gkmax_); } /* interpolate beta radial functions */ std::vector<Spline<double>> beta_rf(nrb); for (int idxrf = 0; idxrf < nrb; idxrf++) { beta_rf[idxrf] = Spline<double>(atom_type.radial_grid()); int nr = atom_type.pp_desc().num_beta_radial_points[idxrf]; for (int ir = 0; ir < nr; ir++) { beta_rf[idxrf][ir] = atom_type.pp_desc().beta_radial_functions(ir, idxrf); } beta_rf[idxrf].interpolate(); } #pragma omp parallel for for (int iq = 0; iq < grid_gkmax_.num_points(); iq++) { Spherical_Bessel_functions jl(unit_cell_.lmax(), atom_type.radial_grid(), grid_gkmax_[iq]); for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) { int l = atom_type.indexr(idxrf).l; int nr = atom_type.pp_desc().num_beta_radial_points[idxrf]; /* compute \int d (j_l(q*r) / dq) beta_l(r) r^2 dr */ /* remeber that beta(r) are defined as miltiplied by r */ auto s = jl.deriv_q(l); beta_djldq_radial_integrals_(idxrf, iat)[iq] = sirius::inner(s, beta_rf[idxrf], 1, nr); } } for (int idxrf = 0; idxrf < atom_type.mt_radial_basis_size(); idxrf++) { beta_djldq_radial_integrals_(idxrf, iat).interpolate(); } } } inline std::pair<int, double> iqdq_gkmax(double q__) const { std::pair<int, double> result; result.first = static_cast<int>((grid_gkmax_.num_points() - 1) * q__ / param_.gk_cutoff()); /* delta q = q - q_i */ result.second = q__ - grid_gkmax_[result.first]; return std::move(result); } inline std::pair<int, double> iqdq_gmax(double q__) const { std::pair<int, double> result; result.first = static_cast<int>((grid_gmax_.num_points() - 1) * q__ / param_.pw_cutoff()); /* delta q = q - q_i */ result.second = q__ - grid_gmax_[result.first]; return std::move(result); } inline std::pair<int, double> iqdq_gmax_vloc(double q__) const { std::pair<int, double> result; result.first = static_cast<int>((grid_gmax_vloc_.num_points() - 1) * q__ / param_.pw_cutoff()); /* delta q = q - q_i */ result.second = q__ - grid_gmax_vloc_[result.first]; return std::move(result); } public: /// Constructor. Radial_integrals(Simulation_parameters const& param__, Unit_cell const& unit_cell__) : param_(param__) , unit_cell_(unit_cell__) { grid_gmax_ = Radial_grid(linear_grid, static_cast<int>(20 * param_.pw_cutoff()), 0, param_.pw_cutoff()); grid_gkmax_ = Radial_grid(linear_grid, static_cast<int>(20 * param_.gk_cutoff()), 0, param_.gk_cutoff()); grid_gmax_vloc_ = Radial_grid(linear_grid, static_cast<int>(100 * param_.pw_cutoff()), 0, param_.pw_cutoff()); if (param_.esm_type() == electronic_structure_method_t::pseudopotential) { generate_beta_radial_integrals(); generate_beta_djldq_radial_integrals(); generate_aug_radial_integrals(); generate_pseudo_core_radial_integrals(); generate_pseudo_rho_radial_integrals(); generate_vloc_radial_integrals(); } } inline double beta_radial_integral(int idxrf__, int iat__, double q__) const { auto iqdq = iqdq_gkmax(q__); return beta_radial_integrals_(idxrf__, iat__)(iqdq.first, iqdq.second); } inline double beta_djldq_radial_integral(int idxrf__, int iat__, double q__) const { auto iqdq = iqdq_gkmax(q__); return beta_djldq_radial_integrals_(idxrf__, iat__)(iqdq.first, iqdq.second); } inline double aug_radial_integral(int idx__, int l__, int iat__, double q__) const { auto iqdq = iqdq_gmax(q__); return aug_radial_integrals_(idx__, l__, iat__)(iqdq.first, iqdq.second); } inline double pseudo_core_radial_integral(int iat__, double q__) const { auto iqdq = iqdq_gmax(q__); return pseudo_core_radial_integrals_(iat__)(iqdq.first, iqdq.second); } inline double pseudo_rho_radial_integral(int iat__, double q__) const { auto iqdq = iqdq_gmax(q__); return pseudo_rho_radial_integrals_(iat__)(iqdq.first, iqdq.second); } inline double vloc_radial_integral(int iat__, double q__) const { auto iqdq = iqdq_gmax_vloc(q__); return vloc_radial_integrals_(iat__)(iqdq.first, iqdq.second); } }; } #endif // __RADIAL_INTEGRALS_H__
program_schedule_guided.c
#include <stdio.h> #include <omp.h> #include <stdlib.h> #include <unistd.h> int main(int argc, char* argv[]) { int thread_count = strtol(argv[1], NULL, 10); int n = strtol(argv[2], NULL, 10); #pragma omp parallel for num_threads(thread_count) schedule(guided) for (int i = 0; i < n; i ++) { printf("i=%d, thread_id=%d\n", i, omp_get_thread_num()); } return 0; }
GB_unaryop__minv_fp32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_uint32 // op(A') function: GB_tran__minv_fp32_uint32 // C type: float // A type: uint32_t // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_uint32 ( float *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)* mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
betweennessCentrality.c
#include "defs.h" double betweennessCentrality(graph* G, DOUBLE_T* BC) { VERT_T *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist* P; /* predecessors of a vertex v on shortest paths from s */ DOUBLE_T* sig; /* No. of shortest paths */ LONG_T* d; /* Length of the shortest path between every pair */ DOUBLE_T* del; /* dependency of vertices */ LONG_T *in_degree, *numEdges, *pSums; LONG_T *pListMem; LONG_T* Srcs; LONG_T *start, *end; LONG_T MAX_NUM_PHASES; LONG_T *psCount; #ifdef _OPENMP omp_lock_t* vLock; LONG_T chunkSize; #endif int seed = 2387; double elapsed_time; #ifdef _OPENMP #pragma omp parallel { #endif VERT_T *myS, *myS_t; LONG_T myS_size; LONG_T i, j, k, p, count, myCount; LONG_T v, w, vert; LONG_T numV, num_traversals, n, m, phase_num; LONG_T tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = 2^K4approx */ numV = 1<<K4approx; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { Srcs = (LONG_T *) malloc(n*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Vertex ID permutation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Start timing code from here */ if (tid == 0) { elapsed_time = get_seconds(); #ifdef VERIFYK4 MAX_NUM_PHASES = 2*sqrt(n); #else MAX_NUM_PHASES = 50; #endif } #ifdef _OPENMP #pragma omp barrier #endif /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { P = (plist *) calloc(n, sizeof(plist)); in_degree = (LONG_T *) calloc(n+1, sizeof(LONG_T)); numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T)); pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); if (tid == 0) { pListMem = (LONG_T *) malloc(m*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); S = (VERT_T *) malloc(n*sizeof(VERT_T)); sig = (DOUBLE_T *) malloc(n*sizeof(DOUBLE_T)); d = (LONG_T *) malloc(n*sizeof(LONG_T)); del = (DOUBLE_T *) calloc(n, sizeof(DOUBLE_T)); start = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); end = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); psCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); } /* local memory for each thread */ myS_size = (2*n)/nthreads; myS = (LONG_T *) malloc(myS_size*sizeof(LONG_T)); num_traversals = 0; myCount = 0; #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif for (p=0; p<n; p++) { i = Srcs[p]; if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } if (num_traversals == numV + 1) { break; } if (tid == 0) { sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; } count = 1; phase_num = 0; #ifdef _OPENMP #pragma omp barrier #endif while (end[phase_num] - start[phase_num] > 0) { myCount = 0; #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(dynamic) #endif for (vert = start[phase_num]; vert < end[phase_num]; vert++) { v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { #ifndef VERIFYK4 /* Filter edges with weights divisible by 8 */ if ((G->weight[j] & 7) != 0) { #endif w = G->endV[j]; if (v != w) { #ifdef _OPENMP myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif /* w found for the first time? */ if (d[w] == -1) { if (myS_size == myCount) { /* Resize myS */ myS_t = (LONG_T *) malloc(2*myS_size*sizeof(VERT_T)); memcpy(myS_t, myS, myS_size*sizeof(VERT_T)); free(myS); myS = myS_t; myS_size = 2*myS_size; } myS[myCount++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } else { if ((d[w] == -1) || (d[w] == d[v]+ 1)) { omp_set_lock(&vLock[w]); sig[w] += sig[v]; P[w].list[P[w].count++] = v; omp_unset_lock(&vLock[w]); } } #endif } #ifndef VERIFYK4 } #endif } } /* Merge all local stacks for next iteration */ phase_num++; psCount[tid+1] = myCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { start[phase_num] = end[phase_num-1]; psCount[0] = start[phase_num]; for(k=1; k<=nthreads; k++) { psCount[k] = psCount[k-1] + psCount[k]; } end[phase_num] = psCount[nthreads]; } #ifdef _OPENMP #pragma omp barrier #endif for (k = psCount[tid]; k < psCount[tid+1]; k++) { S[k] = myS[k-psCount[tid]]; } #ifdef _OPENMP #pragma omp barrier #endif count = end[phase_num]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif while (phase_num > 0) { #ifdef _OPENMP #pragma omp for #endif for (j=start[phase_num]; j<end[phase_num]; j++) { w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } BC[w] += del[w]; } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif } #ifdef _OPENMP chunkSize = n/nthreads; #pragma omp for schedule(static, chunkSize) #endif for (j=0; j<count; j++) { w = S[j]; d[w] = -1; del[w] = 0; P[w].count = 0; } #ifdef _OPENMP #pragma omp barrier #endif } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(myS); if (tid == 0) { free(S); free(pListMem); free(P); free(sig); free(d); free(del); #ifdef _OPENMP free(vLock); #endif free(start); free(end); free(psCount); elapsed_time = get_seconds() - elapsed_time; free(Srcs); } free_sprng(stream); #ifdef _OPENMP } #endif /* Verification */ #ifdef VERIFYK4 { /* PHJK: added {} blocks so declaraions pass simplescalar gcc */ double BCval; if (SCALE % 2 == 0) { BCval = 0.5*pow(2, 3*SCALE/2)-pow(2, SCALE)+1.0; } else { BCval = 0.75*pow(2, (3*SCALE-1)/2)-pow(2, SCALE)+1.0; } { int failed = 0; int i; extern int myround(double); for (i=0; i<G->n; i++) { if (myround(BC[i] - BCval) != 0) { failed = 1; break; } } if (failed) { fprintf(stderr, "Kernel 4 failed validation!\n"); } else { fprintf(stderr, "Kernel 4 validation successful!\n"); } } } #endif //graph* G, DOUBLE_T* BC) fprintf(stderr, "XXXXXXXX: v:%d e:%d\n",G->n, G->m); return elapsed_time; } /* not recommended solution.... */ int myround(double x) { if (x < 0.0) return (int)(x - 0.5); else return (int)(x + 0.5); }
ccm.h
#pragma once #include <ctime> #include <deque> #include <queue> #include "hash.h" #include "update.h" #include "median.h" #include "compact_vector/compact_vector.hpp" #include "vec/vec.h" namespace sketch { inline namespace common { namespace detail { template<typename T1, unsigned int BITS, typename T2, typename Allocator> static inline void zero_memory(compact::vector<T1, BITS, T2, Allocator> &v, size_t newsz=0) { std::memset(v.get(), 0, v.bytes()); // zero array } template<typename T1, unsigned int BITS, typename T2, typename Allocator> static inline void zero_memory(compact::ts_vector<T1, BITS, T2, Allocator> &v, size_t newsz=0) { std::memset(v.get(), 0, v.bytes()); // zero array } } } inline namespace cm { using common::detail::tmpbuffer; using common::Allocator; using std::int64_t; #if NOT_THREADSAFE template<size_t NBITS> class DefaultStaticCompactVectorType: public ::compact::vector<uint64_t, NBITS, uint64_t, Allocator<uint64_t>> { public: DefaultStaticCompactVectorType(size_t nb, size_t nelem): ::compact::vector<uint64_t, NBITS, uint64_t, Allocator<uint64_t>>(nelem) {} }; using DefaultCompactVectorType = ::compact::vector<uint64_t, 0, uint64_t, Allocator<uint64_t>>; #else using DefaultCompactVectorType = ::compact::ts_vector<uint64_t, 0, uint64_t, Allocator<uint64_t>>; template<size_t NBITS> class DefaultStaticCompactVectorType: public ::compact::ts_vector<uint64_t, NBITS, uint64_t, Allocator<uint64_t>> { public: DefaultStaticCompactVectorType(size_t nb, size_t nelem): ::compact::ts_vector<uint64_t, NBITS, uint64_t, Allocator<uint64_t>>(nelem) {} }; #endif namespace detail { template<typename T, typename AllocatorType> static inline double sqrl2(const std::vector<T, AllocatorType> &v, uint32_t nhashes, uint32_t l2sz) { tmpbuffer<double, 8> mem(nhashes); double *ptr = mem.get(); #if defined(_BLAZE_MATH_CUSTOMMATRIX_H_) blaze::CustomMatrix<T, blaze::aligned, blaze::unpadded> data(v.data(), nhashes, size_t(1) << l2sz); for(size_t i = 0; i < data.rows(); ++i) { ptr[i] = blaze::norm(row(data, i)); } #else using VT = typename vec::SIMDTypes<T>::VType; using VS = vec::SIMDTypes<T>; VT sum = VS::set1(0); static constexpr size_t ct = VS::COUNT; for(size_t i = 0; i < nhashes; ++i) { const T *p1 = &v[i << l2sz], *p2 = &v[(i+1)<<l2sz]; if(VS::is_aligned) { while(p2 - p1 > ct) { const auto el = *reinterpret_cast<const VT *>(p1); sum = VS::add(sum, VS::mul(el, el)); p1 += ct; } } else { while(p2 - p1 > ct) { const auto el = VT::loadu(p1); sum = VS::add(sum, VS::mul(el, el)); p1 += ct; } } T full_sum = sum.sum(); while(p1 < p2) full_sum += *p1 * *p1, ++p1; ptr[i] = full_sum; } #endif return std::sqrt(median(ptr, nhashes)); } template<typename T, typename AllocatorType> static inline double sqrl2(const std::vector<T, AllocatorType> &v, const std::vector<T, AllocatorType> &v2, uint32_t nhashes, uint32_t l2sz) { assert(v.size() == v2.size()); tmpbuffer<double, 8> mem(nhashes); double *ptr = mem.get(); #if defined(_BLAZE_MATH_CUSTOMMATRIX_H_) using CM = blaze::CustomMatrix<T, blaze::aligned, blaze::unpadded>; const CM lv(v.data(), nhashes, size_t(1) << l2sz); const CM rv(v2.data(), nhashes, size_t(1) << l2sz); for(auto i = 0u; i < lv.rows(); ++i) { ptr[i] = blaze::norm(row(lv, i) * row(rv, i)); // Elementwise multiplication } #else using VT = typename vec::SIMDTypes<T>::VType; using VS = vec::SIMDTypes<T>; VT sum = VS::set1(0); for(size_t i = 0; i < nhashes; ++i) { auto p1 = &v[i << l2sz], p2 = &v2[i << l2sz], p1e = &v[(i + 1) << l2sz]; T full_sum = std::abs(*p1++ * *p2++); while(p1 != p1e) full_sum += std::pow(*p1++ * *p2++, 2); ptr[i] = std::sqrt(full_sum); } #endif return median(ptr, nhashes); } template<typename T1, unsigned int BITS, typename T2, typename Allocator> static inline double sqrl2(const compact::vector<T1, BITS, T2, Allocator> &v, uint32_t nhashes, uint32_t l2sz) { tmpbuffer<double, 8> mem(nhashes); double *ptr = mem.get(); for(size_t i = 0; i < nhashes; ++i) { size_t start = i << l2sz, end = (i + 1) << l2sz; double sum = 0; while(start != end) { int64_t val = v[start++]; sum += val * val; } ptr[i] = std::sqrt(sum); } return median(ptr, nhashes); } template<typename T1, unsigned int BITS, typename T2, typename Allocator> static inline double sqrl2(const compact::vector<T1, BITS, T2, Allocator> &v, const compact::vector<T1, BITS, T2, Allocator> &v2, uint32_t nhashes, uint32_t l2sz) { tmpbuffer<double, 8> mem(nhashes); double *ptr = mem.get(); for(size_t i = 0; i < nhashes; ++i) { size_t start = i << l2sz, end = (i + 1) << l2sz; double sum = 0; while(start != end) { sum += v[start] * v2[start]; ++start; } ptr[i] = std::sqrt(sum); } return median(ptr, nhashes); } template<typename T1, unsigned int BITS, typename T2, typename Allocator> static inline double sqrl2(const compact::ts_vector<T1, BITS, T2, Allocator> &v, uint32_t nhashes, uint32_t l2sz) { tmpbuffer<double> mem(nhashes); double *ptr = mem.get(); for(size_t i = 0; i < nhashes; ++i) { size_t start = i << l2sz, end = (i + 1) << l2sz; double sum = 0; do { int64_t val = v[start++]; sum += val * val; } while(start != end); ptr[i] = std::sqrt(sum); } return median(ptr, nhashes); } template<typename T1, unsigned int BITS, typename T2, typename Allocator> static inline double sqrl2(const compact::ts_vector<T1, BITS, T2, Allocator> &v, const compact::ts_vector<T1, BITS, T2, Allocator> &v2, uint32_t nhashes, uint32_t l2sz) { tmpbuffer<double, 8> mem(nhashes); double *ptr = mem.get(); for(size_t i = 0; i < nhashes; ++i) { size_t start = i << l2sz, end = (i + 1) << l2sz; double sum = 0; do { sum += v[start] * v2[start]; ++start; } while(start != end); ptr[i] = std::sqrt(sum); } return median(ptr, nhashes); } template<typename T> struct IndexedValue { using Type = typename std::decay_t<decltype(*(std::declval<T>().cbegin()))>; }; } // namespace detail template<typename UpdateStrategy=update::Increment, typename VectorType=DefaultCompactVectorType, typename HashStruct=WangHash, bool conservative_update=true> class ccmbase_t { static_assert(!std::is_same<UpdateStrategy, update::CountSketch>::value || std::is_signed<typename detail::IndexedValue<VectorType>::Type>::value, "If CountSketch is used, value must be signed."); protected: VectorType data_; UpdateStrategy updater_; unsigned nhashes_; unsigned l2sz_:16; unsigned nbits_:16; HashStruct hf_; uint64_t mask_; uint64_t subtbl_sz_; std::vector<uint64_t, common::Allocator<uint64_t>> seeds_; public: using counter_register_type = typename std::decay<decltype(data_[0])>::type; static constexpr bool supports_deletion() { return !conservative_update; } size_t size() const {return data_.size();} std::pair<size_t, size_t> est_memory_usage() const { return std::make_pair(sizeof(*this), seeds_.size() * sizeof(seeds_[0]) + data_.bytes()); } size_t seeds_size() const {return seeds_.size();} void clear() { common::detail::zero_memory(data_, ilog2(subtbl_sz_)); } double l2est() const { return detail::sqrl2(data_, nhashes_, l2sz_); } double join_size_l2est(const ccmbase_t &o) const { PREC_REQ(o.size() == this->size(), "tables must have the same size\n"); return detail::sqrl2(data_, o.data_, nhashes_, l2sz_); } template<typename Func> void for_each_register(const Func &func) { for(size_t i = 0; i < data_.size(); ++i) func(data_[i]); } template<typename Func> void for_each_register(const Func &func) const { for(size_t i = 0; i < data_.size(); ++i) func(data_[i]); } ccmbase_t(ccmbase_t &&o): data_(std::move(o.data_)), updater_(std::move(o.updater_)), nhashes_(o.nhashes_), l2sz_(o.l2sz_), nbits_(o.nbits_), hf_(std::move(o.hf_)), mask_(o.mask_), subtbl_sz_(o.subtbl_sz_), seeds_(std::move(o.seeds_)) { } ccmbase_t(const ccmbase_t &o) = default; //ccmbase_t(ccmbase_t &&o) = default; template<typename... Args> ccmbase_t(int nbits, int l2sz, int64_t nhashes=4, uint64_t seed=0, Args &&... args): data_(nbits, nhashes << l2sz), updater_(seed + l2sz * nbits * nhashes), nhashes_(nhashes), l2sz_(l2sz), nbits_(nbits), hf_(std::forward<Args>(args)...), mask_((1ull << l2sz) - 1), subtbl_sz_(1ull << l2sz) { if(HEDLEY_UNLIKELY(nbits < 0)) throw std::runtime_error("Number of bits cannot be negative."); if(HEDLEY_UNLIKELY(l2sz < 0)) throw std::runtime_error("l2sz cannot be negative."); if(HEDLEY_UNLIKELY(nhashes < 0)) throw std::runtime_error("nhashes cannot be negative."); std::mt19937_64 mt(seed + 4); while(seeds_.size() < static_cast<unsigned>(nhashes)) seeds_.emplace_back(mt()); clear(); VERBOSE_ONLY(std::fprintf(stderr, "data size: %zu. nbits per entry: %u\n", data_.size(), nbits);) } VectorType &ref() {return data_;} template<typename T, typename=std::enable_if_t<!std::is_arithmetic<T>::value>> auto addh(const T &x) { uint64_t hv = hf_(x); return add(hv); } auto addh(uint64_t val) {return add(val);} auto addh_val(uint64_t val) {return add(val);} template<typename T> T hash(T val) const { return hf_(val); } uint64_t subhash(uint64_t val, uint64_t seedind) const { return hash(((val ^ seeds_[seedind]) & mask_) + (val & mask_)); } double wj_est(const ccmbase_t &o) const { std::fprintf(stderr, "[%s:%s:%d] Warning: This function should not be used.\n"); #if WJMETH0 tmpbuffer<double> counts(nhashes_); auto p = counts.get(); #elif MINMETH double minest = std::numeric_limits<double>::max(); #else double minest = 0.; #endif for(size_t i = 0; i < nhashes_; ++i) { uint64_t n = 0, d = 0; uint64_t d1, d2; for(size_t j = (i << l2sz_), e = (i + 1) << l2sz_; j < e; ++j) { d1 = data_[j] > 0 ? data_[i]: -data_[i], d2 = o.data_[j] >0 ? o.data_[j]: -o.data_[j]; n += std::min(d1, d2); d += std::max(d1, d2); } #if WJMETH0 *p++ = double(n) / d; #elif MINMETH minest = std::min(double(n) / d, minest); #else minest += double(n) / d; #endif } #if WJMETH0 return median(counts.get(), nhashes_); #elif MINMETH return minest; #else return minest / nhashes_; #endif } uint64_t mask() const {return mask_;} auto np() const {return l2sz_;} auto &at_pos(uint64_t hv, uint64_t seedind) { return data_[(hv & mask_) + (seedind << np())]; } const auto &at_pos(uint64_t hv, uint64_t seedind) const { return data_[(hv & mask_) + (seedind << np())]; } bool may_contain(uint64_t val) const { throw std::runtime_error("This needs to be rewritten after subhash refactoring."); return true; } using Space = vec::SIMDTypes<uint64_t>; uint32_t may_contain(Space::VType val) const { throw std::runtime_error("This needs to be rewritten after subhash refactoring."); return true; } static constexpr bool is_increment = std::is_same<UpdateStrategy, update::Increment>::value; ssize_t add(const uint64_t val) { unsigned nhdone = 0; ssize_t ret; CONST_IF(conservative_update) { std::vector<uint64_t> indices, best_indices; indices.reserve(nhashes_); //std::fprintf(stderr, "Doing SIMD stuff\n"); //std::fprintf(stderr, "Doing Leftover stuff\n"); while(nhdone < nhashes_) { assert(seeds_.data()); uint64_t hv = hash(val, nhdone); auto index = subtbl_sz_ * nhdone++ + (hv & mask_); indices.push_back(index); } #if 0 if(val == 137) { for(const auto v: indices) std::fprintf(stderr, "index for 137: %u\n", unsigned(v)); } #endif best_indices.push_back(indices[0]); ssize_t minval = data_.operator[](indices[0]); for(size_t i(1); i < indices.size(); ++i) { unsigned score; if((score = data_.operator[](indices[i])) == minval) { best_indices.push_back(indices[i]); } else if(score < minval) { best_indices.clear(); best_indices.push_back(indices[i]); minval = score; } } //std::fprintf(stderr, "Now update\n"); updater_(best_indices, data_, nbits_); ret = minval; //std::fprintf(stderr, "Now updated\n"); } else { // not conservative update. This means we support deletions ret = std::numeric_limits<decltype(ret)>::max(); const auto maxv = 1ull << nbits_; std::vector<uint64_t> indices{0}; while(nhdone < nhashes_) { uint64_t hv = hash(val, nhdone); auto ind = (hv & mask_) + subtbl_sz_ * nhdone++; indices[0] = ind; updater_(indices, data_, maxv); ret = std::min(ret, ssize_t(data_[ind])); } } return ret + is_increment; } auto hash(uint64_t x, unsigned index) const { return hash(x ^ seeds_[index]); } uint64_t est_count(uint64_t val) const { uint64_t ret = std::numeric_limits<uint64_t>::max(); for(unsigned i = 0; i < nhashes_; ++i) { auto hv = hash(val, i); ret = std::min(ret, uint64_t(data_[(hv & mask_) + subtbl_sz_ * i])); } return updater_.est_count(ret); } ccmbase_t operator+(const ccmbase_t &other) const { ccmbase_t cpy = *this; cpy += other; return cpy; } ccmbase_t operator&(const ccmbase_t &other) const { ccmbase_t cpy = *this; cpy &= other; return cpy; } ccmbase_t &operator&=(const ccmbase_t &other) { if(seeds_.size() != other.seeds_.size() || !std::equal(seeds_.cbegin(), seeds_.cend(), other.seeds_.cbegin())) throw std::runtime_error("Could not add sketches together with different hash functions."); for(size_t i(0), e(data_.size()); i < e; ++i) { data_[i] = std::min(static_cast<unsigned>(data_[i]), static_cast<unsigned>(other.data_[i])); } return *this; } ccmbase_t &operator+=(const ccmbase_t &other) { if(seeds_.size() != other.seeds_.size() || !std::equal(seeds_.cbegin(), seeds_.cend(), other.seeds_.cbegin())) throw std::runtime_error("Could not add sketches together with different hash functions."); for(size_t i(0), e(data_.size()); i < e; ++i) { data_[i] = updater_.combine(data_[i], other.data_[i]); } return *this; } }; template<typename HashStruct=WangHash, typename CounterType=int32_t, typename=typename std::enable_if<std::is_signed<CounterType>::value>::type> class csbase_t { /* * Commentary: because of chance, one can end up with a negative number as an estimate. * Either the item collided with another item which was quite large and it was outweighed * or it and others in the bucket were not heavy enough and by chance it did * not weigh over the other items with the opposite sign. Treat these as 0s. */ std::vector<CounterType, Allocator<CounterType>> core_; uint32_t np_, nh_; const HashStruct hf_; uint64_t mask_; std::vector<CounterType, Allocator<CounterType>> seeds_; uint64_t seedseed_; CounterType *data() {return core_.data();} const CounterType *data() const {return core_.data();} public: template<typename...Args> csbase_t(unsigned np, unsigned nh=1, unsigned seedseed=137, Args &&...args): core_(uint64_t(nh) << np), np_(np), nh_(nh), hf_(std::forward<Args>(args)...), mask_((1ull << np_) - 1), seeds_(nh_), seedseed_(seedseed) { //DEPRECATION_WARNING("csbase_t will be deprecated in favor of cs4wbase_t moving forward."); DefaultRNGType gen(np + nh + seedseed); for(auto &el: seeds_) el = gen(); } double l2est() const { return sqrl2(core_, nh_, np_); } CounterType addh_val(uint64_t val) { std::vector<CounterType> counts(nh_); auto cptr = counts.data(); uint64_t v = hf_(val); cptr[0] = add(v, 0); for(unsigned ind = 1;ind < nh_; ++ind) cptr[ind] = add(hf_(seeds_[ind] ^ val), ind); return median(cptr, nh_); } template<typename T> CounterType addh_val(const T &x) { uint64_t hv = hf_(x); return addh_val(hv); } template<typename T, typename=std::enable_if_t<!std::is_arithmetic<T>::value>> void addh(const T &x) { uint64_t hv = hf_(x); addh(hv); } void addh(uint64_t val) { uint64_t v = hf_(val); auto it = seeds_.begin(); add(v, 0); unsigned ind = 1; while(ind < nh_) add(hf_(*it++ ^ val), ind++); } template<typename Func> void for_each_register(const Func &func) { for(size_t i = 0; i < core_.size(); ++i) func(core_[i]); } template<typename Func> void for_each_register(const Func &func) const { for(size_t i = 0; i < core_.size(); ++i) func(core_[i]); } void subh(uint64_t val) { uint64_t v = hf_(val); auto it = seeds_.begin(); sub(v, 0); unsigned ind = 1; while(ind < nh_) sub(hf_(*it++ ^ val), ind++); } auto subh_val(uint64_t val) { tmpbuffer<CounterType> counts(nh_); auto cptr = counts.get(); uint64_t v = hf_(val); auto it = seeds_.begin(); *cptr++ = sub(v, 0); unsigned ind = 1; while(ind < nh_) *cptr++ = sub(hf_(*it++ ^ val), ind++); return median(counts.get(), nh_); } INLINE size_t index(uint64_t hv, unsigned subidx) const noexcept { return (hv & mask_) + (subidx << np_); } INLINE auto add(uint64_t hv, unsigned subidx) noexcept { #if !NDEBUG at_pos(hv, subidx) += sign(hv); return at_pos(hv, subidx); #else return at_pos(hv, subidx) += sign(hv); #endif } INLINE auto vatpos(uint64_t hv, unsigned subidx) const noexcept { return at_pos(hv, subidx) * sign(hv); } INLINE auto sub(uint64_t hv, unsigned subidx) noexcept { return at_pos(hv, subidx) -= sign(hv); } INLINE auto &at_pos(uint64_t hv, unsigned subidx) noexcept { assert(index(hv, subidx) < core_.size() || !std::fprintf(stderr, "hv & mask_: %zu. subidx %d. np: %d. nh: %d. size: %zu\n", size_t(hv&mask_), subidx, np_, nh_, core_.size())); return core_[index(hv, subidx)]; } INLINE auto at_pos(uint64_t hv, unsigned subidx) const noexcept { assert((hv & mask_) + (subidx << np_) < core_.size()); return core_[index(hv, subidx)]; } INLINE int sign(uint64_t hv) const { return hv & (1ul << np_) ? 1: -1; } CounterType est_count(uint64_t val) const { common::detail::tmpbuffer<CounterType> mem(nh_); CounterType *ptr = mem.get(); uint64_t v = hf_(val); auto it = seeds_.begin(); *ptr++ = vatpos(v, 0); for(unsigned ind = 1;ind < nh_; ++it, ++ptr, ++ind) { auto hv = hf_(*it ^ val); *ptr = vatpos(hv, ind); } //std::for_each(mem.get(), mem.get() + nh_, [p=mem.get()](const auto &x) {std::fprintf(stderr, "Count estimate for ind %zd is %u\n", &x - p, int32_t(x));}); /// return median(mem.get(), nh_); } csbase_t &operator+=(const csbase_t &o) { precondition_require(o.size() == this->size(), "tables must have the same size\n"); using VS = vec::SIMDTypes<CounterType>; using VT = typename VS::VType; VT sum = VS::set1(0); static constexpr uint32_t lim = ilog2(VS::COUNT); if(np_ > lim && VS::aligned(o.data()) && VS::aligned(data())) { size_t i = 0; do { VS::store(data() + i, VS::add(VS::load(o.data() + i), VS::load(data() + i))); i += VS::COUNT; } while(i < core_.size()); } else { for(size_t i = 0; i < core_.size(); ++i) core_[i] += o.core_[i]; } return *this; } csbase_t operator+(const csbase_t &o) const { auto tmp = *this; tmp += o; return tmp; } csbase_t &operator-=(const csbase_t &o) { // TODO: SIMD optimize (but how often is this needed?) PREC_REQ(core_.size() == o.core_.size(), "mismatched sizes"); for(size_t i = 0; i < core_.size(); ++i) core_[i] -= o.core_[i]; return *this; } csbase_t operator-(const csbase_t &o) const { auto tmp = *this; tmp -= o; return tmp; } csbase_t fold(int n=1) const { PREC_REQ(n >= 1, "n < 0 is meaningless and n = 1 uses a copy instead."); PREC_REQ(n <= np_, "Can't fold to less than 1"); csbase_t ret(np_ - n, nh_, seedseed_); schism::Schismatic<uint32_t> div(core_.size()); // More cache-efficient way to traverse than iterating over the final sketch for(size_t i = 0; i < core_.size(); ++i) ret.core_[div.mod(i)] += core_[i]; return ret; } }; template<typename CounterType=int32_t, typename HasherSetType=KWiseHasherSet<4>> class cs4wbase_t { /* * Commentary: because of chance, one can end up with a negative number as an estimate. * Either the item collided with another item which was quite large and it was outweighed * or it and others in the bucket were not heavy enough and by chance it did * not weigh over the other items with the opposite sign. Treat these as 0s. */ static_assert(std::is_signed<CounterType>::value, "CounterType must be signed"); // Note: in order to hash other types, you'd need to subclass the HasherSet // class in hash.h and provide an overload for your type, or hash the items // yourself and insert them first. // This is more cumbersome. std::vector<CounterType, Allocator<CounterType>> core_; uint32_t np_, nh_; uint64_t mask_; uint64_t seedseed_; const HasherSetType hf_; CounterType *data() {return core_.data();} const CounterType *data() const {return core_.data();} // TODO: use a simpler hash function under the assumption that it doesn't matter? size_t size() const {return core_.size();} public: cs4wbase_t(unsigned np, unsigned nh=1, unsigned seedseed=137): np_(np), nh_(nh), mask_((1ull << np_) - 1), seedseed_(seedseed), hf_(nh_, seedseed) { assert(hf_.size() == nh_); nh_ += (nh % 2 == 0); core_.resize(nh_ << np_); POST_REQ(core_.size() == (nh_ << np_), "core must be properly sized"); } double l2est() const { return sqrl2(core_, nh_, np_); } CounterType addh_val(uint64_t val) { std::vector<CounterType> counts(nh_); auto cptr = counts.data(); for(unsigned added = 0; added < nh_; ++added) cptr[added] = add(val, added); return median(cptr, nh_); } auto addh(uint64_t val) {return addh_val(val);} auto nhashes() const {return nh_;} auto p() const {return np_;} template<typename T, typename=std::enable_if_t<std::is_arithmetic<T>::value>> auto addh_val(T x) { uint64_t hv = hf_(static_cast<uint64_t>(x)); return addh_val(hv); } template<typename T, typename=std::enable_if_t<!std::is_arithmetic<T>::value>> auto addh_val(const T &x) { uint64_t hv = hf_(x); return addh_val(hv); } template<typename T, typename=std::enable_if_t<std::is_arithmetic<T>::value>> auto addh(T x) {return addh_val(static_cast<uint64_t>(x));} void subh(uint64_t val) { for(unsigned added = 0; added < nh_; ++added) sub(val, added); } auto subh_val(uint64_t val) { tmpbuffer<CounterType> counts(nh_); auto cptr = counts.get(); for(unsigned added = 0; added < nh_; ++added) cptr[added] = sub(val, added); return median(cptr, nh_); } INLINE size_t index(uint64_t hv, unsigned subidx) const noexcept { return (hv & mask_) + (subidx << np_); } INLINE auto add(uint64_t hv, unsigned subidx) noexcept { hv = hf_(hv, subidx); auto &ref = at_pos(hv, subidx); if(ref != std::numeric_limits<CounterType>::max()) // easy branch to predict ref += sign(hv); return ref * sign(hv); } INLINE auto sub(uint64_t hv, unsigned subidx) noexcept { hv = hf_(hv, subidx); auto &ref = at_pos(hv, subidx); if(ref != std::numeric_limits<CounterType>::min()) // easy branch to predict ref -= sign(hv); return ref * sign(hv); } CounterType update(uint64_t val, const double increment=1.) { std::vector<CounterType> counts(nh_); auto cptr = counts.data(); for(unsigned added = 0; added < nh_; ++added) { auto hv = hf_(val, added); auto &ref = at_pos(hv, added); auto shv = sign(hv); ref += increment * shv; cptr[added] = shv * ref; } return median(cptr, nh_); } INLINE auto &at_pos(uint64_t hv, unsigned subidx) noexcept { assert(index(hv, subidx) < core_.size() || !std::fprintf(stderr, "hv & mask_: %zu. subidx %d. np: %d. nh: %d. size: %zu\n", size_t(hv&mask_), subidx, np_, nh_, core_.size())); return core_[index(hv, subidx)]; } INLINE auto at_pos(uint64_t hv, unsigned subidx) const noexcept { assert((hv & mask_) + (subidx << np_) < core_.size()); return core_[index(hv, subidx)]; } double dot_product(const cs4wbase_t &o) const { auto myp = data(), op = o.data(); common::detail::tmpbuffer<CounterType> mem(nh_); auto memp = mem.get(); const size_t tsz = (1ull << np_); double ret = 0.; for(unsigned i = 0u; i < nh_; ++i) { auto lmyp = myp + tsz, lop = op + tsz; #if _OPENMP > 201307L #pragma omp simd #endif for(size_t j = 0; j < tsz; ++j) ret += lmyp[i] * lop[i]; memp[i] = ret; } return median(memp, nh_); } INLINE int sign(uint64_t hv) const noexcept { return hv & (1ul << np_) ? 1: -1; } using Space = vec::SIMDTypes<uint64_t>; INLINE void subh(Space::VType hv) noexcept { hv.for_each([&](auto x) {for(size_t i = 0; i < nh_; sub(x, i++));}); } INLINE void addh(Space::VType hv) noexcept { hv.for_each([&](auto x) {for(size_t i = 0; i < nh_; add(x, i++));}); } CounterType est_count(uint64_t val) const { common::detail::tmpbuffer<CounterType> mem(nh_); CounterType *ptr = mem.get(), *p = ptr; for(unsigned i = 0; i < nh_; ++i) { auto v = hf_(val, i); *p++ = at_pos(v, i) * sign(v); } return median(ptr, nh_); } cs4wbase_t &operator+=(const cs4wbase_t &o) { precondition_require(o.size() == this->size(), "tables must have the same size\n"); using OT = typename vec::SIMDTypes<CounterType>::Type; using VS = vec::SIMDTypes<CounterType>; static constexpr uint32_t lim = ilog2(VS::COUNT); if(np_ > lim && VS::aligned(o.data()) && VS::aligned(data())) { size_t i = 0; do { VS::store(reinterpret_cast<OT *>(data() + i), VS::add(VS::load(reinterpret_cast<const OT *>(o.data() + i)), VS::load(reinterpret_cast<const OT *>(data() + i))) ); i += VS::COUNT; } while(i < core_.size()); } else { for(size_t i = 0; i < core_.size(); ++i) core_[i] += o.core_[i]; } return *this; } cs4wbase_t operator+(const cs4wbase_t &o) const { auto tmp = *this; tmp += o; return tmp; } cs4wbase_t &operator-=(const cs4wbase_t &o) { // TODO: SIMD optimize (but how often is this needed?) PREC_REQ(size() == o.size(), "mismatched sizes"); for(size_t i = 0; i < size(); ++i) core_[i] -= o.core_[i]; return *this; } cs4wbase_t operator-(const cs4wbase_t &o) const { auto tmp = *this; tmp -= o; return tmp; } cs4wbase_t fold(int n=1) const { PREC_REQ(n >= 1, "n < 0 is meaningless and n = 1 uses a copy instead."); PREC_REQ(n <= int(np_), "Can't fold to less than 1"); cs4wbase_t ret(np_ - n, nh_, seedseed_); unsigned destmod = (1ull << ret.p()) - 1; // More cache-efficient way to traverse than iterating over the final sketch const size_t coresubsz = 1ull << p(); for(auto h = 0u; h < nh_; ++h) { auto destptr = &ret.core_[h << ret.p()]; auto coreptr = &core_[h << p()]; for(size_t i = 0; i < coresubsz; ++i) destptr[i & destmod] += coreptr[i]; } return ret; } void read(std::FILE *fp) { std::fread(&np_, sizeof(np_), 1, fp); std::fread(&nh_, sizeof(nh_), 1, fp); std::fread(&seedseed_, sizeof(seedseed_), 1, fp); core_.resize(size_t(nh_) << np_); std::fread(data(), sizeof(CounterType), core_.size(), fp); mask_ = (1ull << np_) - 1; } void write(std::FILE *fp) const { std::fwrite(&np_, sizeof(np_), 1, fp); std::fwrite(&nh_, sizeof(nh_), 1, fp); std::fwrite(&seedseed_, sizeof(seedseed_), 1, fp); std::fwrite(data(), sizeof(CounterType), core_.size(), fp); } void read(std::string p) const { std::FILE *ofp = std::fopen(p.data(), "rb"); if(!ofp) throw std::invalid_argument("File not found"); read(ofp); std::fclose(ofp); } void write(std::string p) const { std::FILE *ofp = std::fopen(p.data(), "wb"); if(!ofp) throw std::invalid_argument("File not found"); write(ofp); std::fclose(ofp); } }; template<typename VectorType=DefaultCompactVectorType, typename HashStruct=WangHash> class cmmbase_t: protected ccmbase_t<update::Increment, VectorType, HashStruct> { uint64_t stream_size_; using BaseType = ccmbase_t<update::Increment, VectorType, HashStruct>; public: cmmbase_t(int nbits, int l2sz, int nhashes=4, uint64_t seed=0): BaseType(nbits, l2sz, nhashes, seed), stream_size_(0) { throw NotImplementedError("count min mean sketch not completed."); } void add(uint64_t val) {this->addh(val);} void addh(uint64_t val) { ++stream_size_; BaseType::addh(val); } uint64_t est_count(uint64_t val) const { return BaseType::est_count(val); // TODO: this (This is just } }; template<typename CMType, template<typename...> class QueueContainer=std::deque, typename...Args> class SlidingWindow { using qc = QueueContainer<uint64_t, Args...>; qc hashes_; public: CMType cm_; size_t queue_size_; SlidingWindow(size_t queue_size, CMType &&cm, qc &&hashes=qc()): hashes_(std::move(hashes)), cm_(std::move(cm)), queue_size_(queue_size) { } void addh(uint64_t v) { cm_.addh(v); if(hashes_.size() == queue_size_) { cm_.subh(hashes_.front()); hashes_.pop_front(); hashes_.push_back(v); } } CMType &sketch() { return cm_; } const CMType &sketch() const { return cm_; } CMType &&release() { return std::move(cm_); } }; using ccm_t = ccmbase_t<>; using cmm_t = cmmbase_t<>; using cs_t = csbase_t<>; using cs4w_t = cs4wbase_t<>; using pccm_t = ccmbase_t<update::PowerOfTwo>; } // namespace cm } // namespace sketch
comms.h
/* //@HEADER // ***************************************************************************** // // HPCGraph: Graph Computation on High Performance Computing Systems // Copyright (2016) Sandia Corporation // // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, // the U.S. Government retains certain rights in this software. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // 1. Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. Neither the name of the Corporation nor the names of the // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Questions? Contact George M. Slota (gmslota@sandia.gov) // Siva Rajamanickam (srajama@sandia.gov) // Kamesh Madduri (madduri@cse.psu.edu) // // ***************************************************************************** //@HEADER */ #ifndef _COMMS_H_ #define _COMMS_H_ #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <assert.h> #include "dist_graph.h" #include "util.h" extern int procid, nprocs; extern bool verbose, debug, debug2, verify, output; #define MAX_SEND_SIZE 268435456 #define THREAD_QUEUE_SIZE 3072 struct mpi_data_t { int32_t* sendcounts; int32_t* recvcounts; int32_t* sdispls; int32_t* rdispls; int32_t* sdispls_cpy; uint64_t* recvcounts_temp; uint64_t* sendcounts_temp; uint64_t* sdispls_temp; uint64_t* rdispls_temp; uint64_t* sdispls_cpy_temp; uint64_t* sendbuf_vert; uint64_t* sendbuf_data; double* sendbuf_data_flt; uint64_t* recvbuf_vert; uint64_t* recvbuf_data; double* recvbuf_data_flt; uint64_t total_recv; uint64_t total_send; uint64_t global_queue_size; } ; struct queue_data_t { uint64_t* queue; uint64_t* queue_next; uint64_t* queue_send; uint64_t queue_size; uint64_t next_size; uint64_t send_size; } ; struct thread_queue_t { int32_t tid; uint64_t* thread_queue; uint64_t* thread_send; uint64_t thread_queue_size; uint64_t thread_send_size; } ; struct thread_comm_t { int32_t tid; bool* v_to_rank; uint64_t* sendcounts_thread; uint64_t* sendbuf_vert_thread; uint64_t* sendbuf_data_thread; double* sendbuf_data_thread_flt; int32_t* sendbuf_rank_thread; uint64_t* thread_starts; uint64_t thread_queue_size; } ; void init_queue_data(dist_graph_t* g, queue_data_t* q); void clear_queue_data(queue_data_t* q); void init_comm_data(mpi_data_t* comm); void clear_comm_data(mpi_data_t* comm); void init_thread_queue(thread_queue_t* tq); void clear_thread_queue(thread_queue_t* tq); void init_thread_comm(thread_comm_t* tc); void clear_thread_comm(thread_comm_t* tc); void init_thread_comm_flt(thread_comm_t* tc); void clear_thread_commflt(thread_comm_t* tc); void init_sendbuf_vid_data(mpi_data_t* comm); void init_recvbuf_vid_data(mpi_data_t* comm); void init_sendbuf_vid_data_flt(mpi_data_t* comm); void init_recvbuf_vid_data_flt(mpi_data_t* comm); void clear_recvbuf_vid_data(mpi_data_t* comm); void clear_allbuf_vid_data(mpi_data_t* comm); inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void exchange_verts_bicc(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void exchange_verts(mpi_data_t* comm); inline void exchange_data(mpi_data_t* comm); inline void exchange_data_flt(mpi_data_t* comm); inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q); inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm); inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index); inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index, uint64_t count_data); inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data); inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3); inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id); inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_queue(thread_queue_t* tq, queue_data_t* q); inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id); inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2); inline void empty_send(thread_queue_t* tq, queue_data_t* q); inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank); inline void add_vid_data_to_send_flt(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, double data_val, int32_t send_rank); inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm); inline void empty_vid_data_flt(thread_comm_t* tc, mpi_data_t* comm); inline void exchange_verts(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = q->send_size; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; ++comm->sendcounts[ghost_task]; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; comm->sendbuf_vert = (uint64_t*)malloc((uint64_t)(cur_send+1)*sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; ++i) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next+q->next_size+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t* temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_verts_bicc(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { comm->global_queue_size = 0; uint64_t task_queue_size = q->next_size + q->send_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; for (uint64_t c = 0; c < num_comms; ++c) { uint64_t send_begin = (q->send_size * c) / num_comms; uint64_t send_end = (q->send_size * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = q->send_size; if (send_begin % 2 != 0) send_begin++; if (send_end % 2 != 0) send_end++; for (int32_t i = 0; i < nprocs; ++i) { comm->sendcounts[i] = 0; comm->recvcounts[i] = 0; } for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; comm->sendcounts[ghost_task] += 2; } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; comm->sendbuf_vert = (uint64_t*)malloc((uint64_t)(cur_send+1)*sizeof(uint64_t)); if (comm->sendbuf_vert == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (uint64_t i = send_begin; i < send_end; i += 2) { uint64_t ghost_index = q->queue_send[i] - g->n_local; uint64_t ghost_task = g->ghost_tasks[ghost_index]; uint64_t vert = g->ghost_unmap[ghost_index]; uint64_t parent = q->queue_send[i+1]; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = vert; comm->sendbuf_vert[comm->sdispls_cpy[ghost_task]++] = parent; } MPI_Alltoallv(comm->sendbuf_vert, comm->sendcounts, comm->sdispls, MPI_UINT64_T, q->queue_next+q->next_size+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(comm->sendbuf_vert); sum_recv += cur_recv; } q->queue_size = q->next_size + sum_recv; q->next_size = 0; q->send_size = 0; uint64_t* temp = q->queue; q->queue = q->queue_next; q->queue_next = temp; } inline void exchange_vert_data(dist_graph_t* g, mpi_data_t* comm, queue_data_t* q) { for (int32_t i = 0; i < nprocs; ++i) comm->recvcounts_temp[i] = 0; MPI_Alltoall(comm->sendcounts_temp, 1, MPI_UINT64_T, comm->recvcounts_temp, 1, MPI_UINT64_T, MPI_COMM_WORLD); comm->total_recv = 0; for (int i = 0; i < nprocs; ++i) comm->total_recv += comm->recvcounts_temp[i]; comm->recvbuf_vert = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t)); comm->recvbuf_data = (uint64_t*)malloc(comm->total_recv*sizeof(uint64_t)); comm->recvbuf_data_flt = NULL; if (comm->recvbuf_vert == NULL || comm->recvbuf_data == NULL) throw_err("exchange_vert_data() unable to allocate comm buffers", procid); comm->global_queue_size = 0; uint64_t task_queue_size = comm->total_send; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); uint64_t* buf_d = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); if (buf_v == NULL || buf_d == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j]; uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j]; buf_v[comm->sdispls_cpy[i]] = vert; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); comm->global_queue_size = 0; task_queue_size = comm->total_recv + q->next_size; MPI_Allreduce(&task_queue_size, &comm->global_queue_size, 1, MPI_UINT64_T, MPI_SUM, MPI_COMM_WORLD); q->send_size = 0; } inline void exchange_verts(mpi_data_t* comm) { if (debug) { printf("Task %d exchange_verts() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_v = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); if (buf_v == NULL) throw_err("exchange_verts(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t vert = comm->sendbuf_vert[comm->sdispls_temp[i]+j]; buf_v[comm->sdispls_cpy[i]++] = vert; } } MPI_Alltoallv(buf_v, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_vert+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_v); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_verts() success\n", procid); } } inline void exchange_data(mpi_data_t* comm) { if (debug) { printf("Task %d exchange_data() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; uint64_t* buf_d = (uint64_t*)malloc((uint64_t)(cur_send)*sizeof(uint64_t)); if (buf_d == NULL) throw_err("exchange_data(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { uint64_t data = comm->sendbuf_data[comm->sdispls_temp[i]+j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_UINT64_T, comm->recvbuf_data+sum_recv, comm->recvcounts, comm->rdispls, MPI_UINT64_T, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data() success\n", procid); } } inline void exchange_data_flt(mpi_data_t* comm) { if (debug) { printf("Task %d exchange_data_flt() start\n", procid); } uint64_t num_comms = comm->global_queue_size / (uint64_t)MAX_SEND_SIZE + 1; uint64_t sum_recv = 0; uint64_t sum_send = 0; for (uint64_t c = 0; c < num_comms; ++c) { for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; comm->sendcounts[i] = (int32_t)(send_end - send_begin); assert(comm->sendcounts[i] >= 0); } MPI_Alltoall(comm->sendcounts, 1, MPI_INT32_T, comm->recvcounts, 1, MPI_INT32_T, MPI_COMM_WORLD); comm->sdispls[0] = 0; comm->sdispls_cpy[0] = 0; comm->rdispls[0] = 0; for (int32_t i = 1; i < nprocs; ++i) { comm->sdispls[i] = comm->sdispls[i-1] + comm->sendcounts[i-1]; comm->rdispls[i] = comm->rdispls[i-1] + comm->recvcounts[i-1]; comm->sdispls_cpy[i] = comm->sdispls[i]; } int32_t cur_send = comm->sdispls[nprocs-1] + comm->sendcounts[nprocs-1]; int32_t cur_recv = comm->rdispls[nprocs-1] + comm->recvcounts[nprocs-1]; double* buf_d = (double*)malloc((double)(cur_send)*sizeof(double)); if (buf_d == NULL) throw_err("exchange_data_flt(), unable to allocate comm buffers", procid); for (int32_t i = 0; i < nprocs; ++i) { uint64_t send_begin = (comm->sendcounts_temp[i] * c) / num_comms; uint64_t send_end = (comm->sendcounts_temp[i] * (c + 1)) / num_comms; if (c == (num_comms-1)) send_end = comm->sendcounts_temp[i]; for (uint64_t j = send_begin; j < send_end; ++j) { double data = comm->sendbuf_data_flt[comm->sdispls_temp[i]+j]; buf_d[comm->sdispls_cpy[i]++] = data; } } MPI_Alltoallv(buf_d, comm->sendcounts, comm->sdispls, MPI_DOUBLE, comm->recvbuf_data_flt+sum_recv, comm->recvcounts, comm->rdispls, MPI_DOUBLE, MPI_COMM_WORLD); free(buf_d); sum_recv += cur_recv; sum_send += cur_send; } assert(sum_recv == comm->total_recv); assert(sum_send == comm->total_send); if (debug) { printf("Task %d exchange_data_flt() success\n", procid); } } inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index-g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; ++tc->sendcounts_thread[out_rank]; } } } } inline void update_sendcounts_thread(dist_graph_t* g, thread_comm_t* tc, uint64_t vert_index, uint64_t count_data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index-g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; tc->sendcounts_thread[out_rank] += count_data; } } } } inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data, out_rank); } } } } inline void update_vid_data_queues(dist_graph_t* g, thread_comm_t* tc, mpi_data_t* comm, uint64_t vert_index, uint64_t data1, uint64_t data2, uint64_t data3) { for (int32_t i = 0; i < nprocs; ++i) tc->v_to_rank[i] = false; uint64_t out_degree = out_degree(g, vert_index); uint64_t* outs = out_vertices(g, vert_index); for (uint64_t j = 0; j < out_degree; ++j) { uint64_t out_index = outs[j]; if (out_index >= g->n_local) { int32_t out_rank = g->ghost_tasks[out_index - g->n_local]; if (!tc->v_to_rank[out_rank]) { tc->v_to_rank[out_rank] = true; add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data1, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data2, out_rank); add_vid_data_to_send(tc, comm, g->local_unmap[vert_index], data3, out_rank); } } } } inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id) { tq->thread_queue[tq->thread_queue_size++] = vertex_id; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void add_vid_to_queue(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_queue[tq->thread_queue_size++] = vertex_id1; tq->thread_queue[tq->thread_queue_size++] = vertex_id2; if (tq->thread_queue_size == THREAD_QUEUE_SIZE) empty_queue(tq, q); } inline void empty_queue(thread_queue_t* tq, queue_data_t* q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->next_size += tq->thread_queue_size; start_offset -= tq->thread_queue_size; for (uint64_t i = 0; i < tq->thread_queue_size; ++i) q->queue_next[start_offset + i] = tq->thread_queue[i]; tq->thread_queue_size = 0; } inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id) { tq->thread_send[tq->thread_send_size++] = vertex_id; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void add_vid_to_send(thread_queue_t* tq, queue_data_t* q, uint64_t vertex_id1, uint64_t vertex_id2) { tq->thread_send[tq->thread_send_size++] = vertex_id1; tq->thread_send[tq->thread_send_size++] = vertex_id2; if (tq->thread_send_size == THREAD_QUEUE_SIZE) empty_send(tq, q); } inline void empty_send(thread_queue_t* tq, queue_data_t* q) { uint64_t start_offset; #pragma omp atomic capture start_offset = q->send_size += tq->thread_send_size; start_offset -= tq->thread_send_size; for (uint64_t i = 0; i < tq->thread_send_size; ++i) q->queue_send[start_offset + i] = tq->thread_send[i]; tq->thread_send_size = 0; } inline void add_vid_data_to_send(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, uint64_t data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data(tc, comm); } inline void add_vid_data_to_send_flt(thread_comm_t* tc, mpi_data_t* comm, uint64_t vertex_id, double data_val, int32_t send_rank) { tc->sendbuf_vert_thread[tc->thread_queue_size] = vertex_id; tc->sendbuf_data_thread_flt[tc->thread_queue_size] = data_val; tc->sendbuf_rank_thread[tc->thread_queue_size] = send_rank; ++tc->thread_queue_size; ++tc->sendcounts_thread[send_rank]; if (tc->thread_queue_size == THREAD_QUEUE_SIZE) empty_vid_data_flt(tc, comm); } inline void empty_vid_data(thread_comm_t* tc, mpi_data_t* comm) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } inline void empty_vid_data_flt(thread_comm_t* tc, mpi_data_t* comm) { for (int32_t i = 0; i < nprocs; ++i) { #pragma omp atomic capture tc->thread_starts[i] = comm->sdispls_cpy_temp[i] += tc->sendcounts_thread[i]; tc->thread_starts[i] -= tc->sendcounts_thread[i]; } for (uint64_t i = 0; i < tc->thread_queue_size; ++i) { int32_t cur_rank = tc->sendbuf_rank_thread[i]; comm->sendbuf_vert[tc->thread_starts[cur_rank]] = tc->sendbuf_vert_thread[i]; comm->sendbuf_data_flt[tc->thread_starts[cur_rank]] = tc->sendbuf_data_thread_flt[i]; ++tc->thread_starts[cur_rank]; } for (int32_t i = 0; i < nprocs; ++i) { tc->thread_starts[i] = 0; tc->sendcounts_thread[i] = 0; } tc->thread_queue_size = 0; } #endif
bug51982.c
// RUN: %libomptarget-compile-generic -O1 && %libomptarget-run-generic // -O1 to run openmp-opt int main(void) { long int aa = 0; int ng = 12; int nxyz = 5; const long exp = ng * nxyz; #pragma omp target map(tofrom : aa) for (int gid = 0; gid < nxyz; gid++) { #pragma omp parallel for for (unsigned int g = 0; g < ng; g++) { #pragma omp atomic aa += 1; } } if (aa != exp) { return 1; } return 0; }
targc-272328.c
struct test_type { int *p1; } tt; #include "stdlib.h" #include "stdio.h" int C[10]; int E[10]; int main() { int i; int *p; tt.p1 = (int*) malloc(10*sizeof(int)); p=tt.p1; for (i=0; i<10; i++) { tt.p1[i] = i+100; C[i] = 0; } for (i=0; i<10; i++) E[i]=C[i]+10 + tt.p1[i]; #pragma omp target map(tofrom: C) map(to: tt, tt.p1[:10]) { for (i=0; i<10; i++) C[i]=C[i]+10 + tt.p1[i]; } for (i=0; i<10; i++) { printf("%d \n", C[i]); if (E[i] != C[i]) return 1; } return 0; }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ case mshadow::kBfloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } #ifndef _WIN32 /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } #endif /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<bool val> struct set_to_bool : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to true and false */ using set_true = set_to_bool<true>; using set_false = set_to_bool<false>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
LG_CC_FastSV5_64.c
//------------------------------------------------------------------------------ // LG_CC_FastSV5_64: connected components (64-bit version) //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause //------------------------------------------------------------------------------ // Code is based on the algorithm described in the following paper // Zhang, Azad, Hu. FastSV: FastSV: A Distributed-Memory Connected Component // Algorithm with Fast Convergence (SIAM PP20) // A subsequent update to the algorithm is here (which might not be reflected // in this code): // // Yongzhe Zhang, Ariful Azad, Aydin Buluc: Parallel algorithms for finding // connected components using linear algebra. J. Parallel Distributed Comput. // 144: 14-27 (2020). // Modified by Tim Davis, Texas A&M University // The input matrix A must be symmetric. Self-edges (diagonal entries) are // OK, and are ignored. The values and type of A are ignored; just its // structure is accessed. // todo: this function is not thread-safe, since it exports G->A and then // reimports it back. G->A is unchanged when the function returns, but during // execution G->A is invalid. #define LAGraph_FREE_ALL ; #include "LG_internal.h" #if !LG_VANILLA #if (! LG_SUITESPARSE ) #error "SuiteSparse:GraphBLAS v6.0.0 or later required" #endif //------------------------------------------------------------------------------ // hash functions: todo describe me //------------------------------------------------------------------------------ // hash table size must be a power of 2 #define HASH_SIZE 1024 // number of samples to insert into the hash table // todo: this seems to be a lot of entries for a HASH_SIZE of 1024. // There could be lots of collisions. #define HASH_SAMPLES 864 #define HASH(x) (((x << 4) + x) & (HASH_SIZE-1)) #define NEXT(x) ((x + 23) & (HASH_SIZE-1)) //------------------------------------------------------------------------------ // ht_init: todo describe me //------------------------------------------------------------------------------ // Clear the hash table counts (ht_val [0:HASH_SIZE-1] = 0), and set all hash // table entries as empty (ht_key [0:HASH_SIZE-1] =-1). // todo: the memset of ht_key is confusing // todo: the name "ht_val" is confusing. It is not a value, but a count of // the number of times the value x = ht_key [h] has been inserted into the // hth position in the hash table. It should be renamed ht_cnt. static inline void ht_init ( int64_t *ht_key, int64_t *ht_val ) { memset (ht_key, -1, sizeof (int64_t) * HASH_SIZE) ; memset (ht_val, 0, sizeof (int64_t) * HASH_SIZE) ; } //------------------------------------------------------------------------------ // ht_sample: todo describe me //------------------------------------------------------------------------------ // static inline void ht_sample ( uint64_t *V, // array of size n (todo: this is a bad variable name) int64_t n, int64_t samples, // number of samples to take from V int64_t *ht_key, int64_t *ht_val, uint64_t *seed ) { for (int64_t k = 0 ; k < samples ; k++) { // select an entry from V at random int64_t x = V [LAGraph_Random60 (seed) % n] ; // find x in the hash table // todo: make this loop a static inline function (see also below) int64_t h = HASH (x) ; while (ht_key [h] != -1 && ht_key [h] != x) { h = NEXT (h) ; } ht_key [h] = x ; ht_val [h]++ ; } } //------------------------------------------------------------------------------ // ht_most_frequent: todo describe me //------------------------------------------------------------------------------ // todo what if key is returned as -1? Code breaks. todo: handle this case static inline int64_t ht_most_frequent ( int64_t *ht_key, int64_t *ht_val ) { int64_t key = -1 ; int64_t val = 0 ; // max (ht_val [0:HASH_SIZE-1]) for (int64_t h = 0 ; h < HASH_SIZE ; h++) { if (ht_val [h] > val) { key = ht_key [h] ; val = ht_val [h] ; } } return (key) ; // return most frequent key } //------------------------------------------------------------------------------ // Reduce_assign: w (index) += s, using MIN as the "+=" accum operator //------------------------------------------------------------------------------ // The index array, of size n can have duplicates. The vectors w and s are // full (all entries present). This function computes: // // for (j = 0 ; j < n ; j++) // { // uint64_t i = index [j] ; // w [i] = min (w [i], s [j]) ; // } // // If C(i,j) = true where i == index [j], then this can be written with the // min_second semiring: // // w = min (w, C*s) static inline int Reduce_assign ( GrB_Vector w, // vector of size n, all entries present GrB_Vector s, // vector of size n, all entries present GrB_Matrix C, // boolean matrix of size n-by-n GrB_Index **Cp_handle, // array of size n+1, equal to 0:n GrB_Index **Ci_handle, // index array of size n, can have duplicates bool **Cx_handle, // array of size 1, equal to true char *msg ) { // size of Cp, Ci, and Cx in bytes GrB_Index n ; GrB_TRY (GrB_Vector_size (&n, w)) ; GrB_Index Cp_size = (n+1) * sizeof (GrB_Index) ; GrB_Index Ci_size = n * sizeof (GrB_Index) ; GrB_Index Cx_size = sizeof (bool) ; // pack Cp, Ci, and Cx into a matrix C with C(i,j) = true if Ci(j) == i bool iso = true ; bool jumbled = false ; GrB_TRY (GxB_Matrix_pack_CSC (C, Cp_handle, Ci_handle, (void **) Cx_handle, Cp_size, Ci_size, Cx_size, iso, jumbled, NULL)) ; // w = min (w, C*s) using the MIN_SECOND semiring GrB_TRY (GrB_mxv (w, NULL, GrB_MIN_UINT64, GrB_MIN_SECOND_SEMIRING_UINT64, C, s, NULL)) ; // unpack the contents of C GrB_TRY (GxB_Matrix_unpack_CSC (C, Cp_handle, Ci_handle, (void **)Cx_handle, &Cp_size, &Ci_size, &Cx_size, &iso, &jumbled, NULL)) ; return (GrB_SUCCESS) ; // yay! It works! } //------------------------------------------------------------------------------ // LG_CC_FastSV5_64 //------------------------------------------------------------------------------ // The output of LG_CC_FastSV5 is a vector component, where // component(i)=s if node i is in the connected compononent whose // representative node is node s. If s is a representative, then // component(s)=s. The number of connected components in the graph G is the // number of representatives. #undef LAGraph_FREE_ALL #define LAGraph_FREE_ALL \ { \ LAGraph_Free ((void **) &Cp) ; \ LAGraph_Free ((void **) &Cx) ; \ LAGraph_Free ((void **) &V) ; \ LAGraph_Free ((void **) &ht_key) ; \ LAGraph_Free ((void **) &ht_val) ; \ /* todo why is T not freed?? */ \ GrB_free (&t) ; \ GrB_free (&f) ; \ GrB_free (&gp) ; \ GrB_free (&mngp) ; \ GrB_free (&gp_new) ; \ GrB_free (&mod) ; \ } #endif int LG_CC_FastSV5_64 // SuiteSparse:GraphBLAS method, with GxB extensions ( // output GrB_Vector *component, // component(i)=s if node is in the component s // inputs LAGraph_Graph G, // input graph, G->A can change char *msg ) { #if LG_VANILLA LG_CHECK (0, -1, "SuiteSparse required for this method") ; #else //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; uint64_t *V = NULL ; int64_t *ht_key = NULL, *ht_val = NULL ; GrB_Index n, nnz ; GrB_Vector f = NULL, gp_new = NULL, mngp = NULL, mod = NULL, gp = NULL, t = NULL ; GrB_Matrix T = NULL, C = NULL ; GrB_Index *Cp = NULL ; GrB_Index Cp_size = 0 ; bool *Cx = NULL ; LG_CHECK (LAGraph_CheckGraph (G, msg), -1, "graph is invalid") ; LG_CHECK (component == NULL, -1, "component parameter is NULL") ; if (G->kind == LAGRAPH_ADJACENCY_UNDIRECTED || (G->kind == LAGRAPH_ADJACENCY_DIRECTED && G->A_structure_is_symmetric == LAGRAPH_TRUE)) { // A must be symmetric ; } else { // A must not be unsymmetric LG_CHECK (false, -1, "input must be symmetric") ; } GrB_Matrix S = G->A ; GrB_TRY (GrB_Matrix_nrows (&n, S)) ; GrB_TRY (GrB_Matrix_nvals (&nnz, S)) ; #define FASTSV_SAMPLES 4 bool sampling = (n * FASTSV_SAMPLES * 2 < nnz) ; // random number seed uint64_t seed = n ; //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- // determine # of threads to use int nthreads ; LAGraph_TRY (LAGraph_GetNumThreads (&nthreads, NULL)) ; nthreads = LAGraph_MIN (nthreads, n / 16) ; nthreads = LAGraph_MAX (nthreads, 1) ; // vectors GrB_TRY (GrB_Vector_new (&f, GrB_UINT64, n)) ; GrB_TRY (GrB_Vector_new (&gp_new, GrB_UINT64, n)) ; GrB_TRY (GrB_Vector_new (&mod, GrB_BOOL, n)) ; V = LAGraph_Malloc (n, sizeof (uint64_t)) ; GrB_TRY (GrB_assign (f, NULL, NULL, 0, GrB_ALL, n, NULL)) ; GrB_TRY (GrB_apply (f, NULL, NULL, GrB_ROWINDEX_INT64, f, 0, NULL)) ; GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ; GrB_TRY (GrB_Vector_dup (&gp, f)) ; GrB_TRY (GrB_Vector_dup (&mngp, f)) ; // allocate the hash table ht_key = LAGraph_Malloc (HASH_SIZE, sizeof (int64_t)) ; ht_val = LAGraph_Malloc (HASH_SIZE, sizeof (int64_t)) ; LG_CHECK (ht_key == NULL || ht_val == NULL, -1, "out of memory") ; // create Cp = 0:n, and Cx = true, and the empty C matrix GrB_TRY (GrB_Vector_new (&t, GrB_INT64, n+1)) ; GrB_TRY (GrB_assign (t, NULL, NULL, 0, GrB_ALL, n+1, NULL)) ; GrB_TRY (GrB_apply (t, NULL, NULL, GrB_ROWINDEX_INT64, t, 0, NULL)) ; GrB_TRY (GxB_Vector_unpack_Full (t, (void **) &Cp, &Cp_size, NULL, NULL)) ; Cx = (bool *) LAGraph_Malloc (1, sizeof (bool)) ; Cx [0] = true ; GrB_TRY (GrB_free (&t)) ; GrB_TRY (GrB_Matrix_new (&C, GrB_BOOL, n, n)) ; //-------------------------------------------------------------------------- // sample phase //-------------------------------------------------------------------------- if (sampling) { //---------------------------------------------------------------------- // export S = G->A in CSR format //---------------------------------------------------------------------- // S is not modified. It is only exported so that its contents can be // read by the parallel loops below. GrB_Type type ; GrB_Index nrows, ncols, nvals ; size_t typesize ; int64_t nonempty ; GrB_Index *Sp, *Sj ; void *Sx ; bool S_jumbled = false ; GrB_Index Sp_size, Sj_size, Sx_size ; bool S_iso = false ; GrB_TRY (GrB_Matrix_nvals (&nvals, S)) ; GrB_TRY (GxB_Matrix_export_CSR (&S, &type, &nrows, &ncols, &Sp, &Sj, &Sx, &Sp_size, &Sj_size, &Sx_size, &S_iso, &S_jumbled, NULL)) ; GrB_TRY (GxB_Type_size (&typesize, type)) ; G->A = NULL ; //---------------------------------------------------------------------- // allocate space to construct T //---------------------------------------------------------------------- GrB_Index Tp_len = nrows+1, Tp_size = Tp_len*sizeof(GrB_Index); GrB_Index Tj_len = nvals, Tj_size = Tj_len*sizeof(GrB_Index); GrB_Index Tx_len = nvals ; GrB_Index *Tp = LAGraph_Malloc (Tp_len, sizeof (GrB_Index)) ; GrB_Index *Tj = LAGraph_Malloc (Tj_len, sizeof (GrB_Index)) ; GrB_Index Tx_size = typesize ; void *Tx = LAGraph_Calloc (1, typesize) ; // T is iso // todo check out-of-memory conditions //---------------------------------------------------------------------- // allocate workspace //---------------------------------------------------------------------- int64_t *range = LAGraph_Malloc (nthreads + 1, sizeof (int64_t)) ; GrB_Index *count = LAGraph_Malloc (nthreads + 1, sizeof (GrB_Index)) ; // todo check out-of-memory conditions memset (count, 0, sizeof (GrB_Index) * (nthreads + 1)) ; //---------------------------------------------------------------------- // define parallel tasks to construct T //---------------------------------------------------------------------- // thread tid works on rows range[tid]:range[tid+1]-1 of S and T for (int tid = 0 ; tid <= nthreads ; tid++) { range [tid] = (n * tid + nthreads - 1) / nthreads ; } //---------------------------------------------------------------------- // determine the number entries to be constructed in T for each thread //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { int64_t deg = Sp [i + 1] - Sp [i] ; count [tid + 1] += LAGraph_MIN (FASTSV_SAMPLES, deg) ; } } //---------------------------------------------------------------------- // count = cumsum (count) //---------------------------------------------------------------------- for (int tid = 0 ; tid < nthreads ; tid++) { count [tid + 1] += count [tid] ; } //---------------------------------------------------------------------- // construct T //---------------------------------------------------------------------- // T (i,:) consists of the first FASTSV_SAMPLES of S (i,:). // todo: this could be done by GxB_Select, using a new operator. Need // to define a set of GxB_SelectOp operators that would allow for this. // Note that Tx is not modified. Only Tp and Tj are constructed. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index p = count [tid] ; Tp [range [tid]] = p ; for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { // construct T (i,:) from the first entries in S (i,:) for (int64_t j = 0 ; j < FASTSV_SAMPLES && Sp [i] + j < Sp [i + 1] ; j++) { Tj [p++] = Sj [Sp [i] + j] ; } Tp [i + 1] = p ; } } //---------------------------------------------------------------------- // import the result into the GrB_Matrix T //---------------------------------------------------------------------- // Note that Tx is unmodified. // in SuiteSparse:GraphBLAS v5, sizes are in bytes, not entries GrB_Index Tp_siz = Tp_size ; GrB_Index Tj_siz = Tj_size ; GrB_Index Tx_siz = Tx_size ; GrB_Index t_nvals = Tp [nrows] ; GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols, &Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz, true, // T is iso S_jumbled, NULL)) ; //---------------------------------------------------------------------- // find the connected components of T //---------------------------------------------------------------------- // todo: this is nearly identical to the final phase below. // Make this a function bool change = true, is_first = true ; while (change) { // hooking & shortcutting // mngp = min (mngp, T*gp) using the MIN_SECOND semiring GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT64, GrB_MIN_SECOND_SEMIRING_UINT64, T, gp, NULL)) ; if (!is_first) { // f = min (f, C*mngp) where C is C(i,j) = true if i=V(j) LAGraph_TRY (Reduce_assign (f, mngp, C, &Cp, &V, &Cx, msg)) ; } // f = min (f, mngp, gp) GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT64, GrB_MIN_UINT64, mngp, gp, NULL)) ; // calculate grandparent: gp_new = f (f) GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ; GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, V, n, NULL)) ; // terminate if gp and gb_new are the same GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT64, gp_new, gp, NULL)) ; GrB_TRY (GrB_reduce (&change, NULL, GrB_LOR_MONOID_BOOL, mod, NULL)) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; is_first = false ; } //---------------------------------------------------------------------- // todo: describe me //---------------------------------------------------------------------- ht_init (ht_key, ht_val) ; ht_sample (V, n, HASH_SAMPLES, ht_key, ht_val, &seed) ; int64_t key = ht_most_frequent (ht_key, ht_val) ; // todo: what if key is returned as -1? Then T below is invalid. int64_t t_nonempty = -1 ; bool T_jumbled = false, T_iso = true ; // export T GrB_TRY (GxB_Matrix_export_CSR (&T, &type, &nrows, &ncols, &Tp, &Tj, &Tx, &Tp_siz, &Tj_siz, &Tx_siz, &T_iso, &T_jumbled, NULL)) ; // todo what is this phase doing? It is constructing a matrix T that // depends only on S, key, and V. T contains a subset of the entries // in S, except that T (i,:) is empty if // The prior content of T is ignored; it is exported from the earlier // phase, only to reuse the allocated space for T. However, T_jumbled // is preserved from the prior matrix T, which doesn't make sense. // This parallel loop is badly load balanced. Each thread operates on // the same number of rows of S, regardless of how many entries appear // in each set of rows. It uses one thread per task, statically // scheduled. #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index ptr = Sp [range [tid]] ; // thread tid scans S (range [tid]:range [tid+1]-1,:), // and constructs T(i,:) for all rows in this range. for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { int64_t pv = V [i] ; // what is pv? Tp [i] = ptr ; // start the construction of T(i,:) // T(i,:) is empty if pv == key if (pv != key) { // scan S(i,:) for (GrB_Index p = Sp [i] ; p < Sp [i+1] ; p++) { // get S(i,j) int64_t j = Sj [p] ; if (V [j] != key) { // add the entry T(i,j) to T, but skip it if // V [j] is equal to key Tj [ptr++] = j ; } } // add the entry T(i,key) if there is room for it in T(i,:) if (ptr - Tp [i] < Sp [i+1] - Sp [i]) { Tj [ptr++] = key ; } } } // count the number of entries inserted into T by this thread? count [tid] = ptr - Tp [range [tid]] ; } // Compact empty space out of Tj not filled in from the above phase. // This is a lot of work and should be done in parallel. GrB_Index offset = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { memcpy (Tj + offset, Tj + Tp [range [tid]], sizeof (GrB_Index) * count [tid]) ; offset += count [tid] ; count [tid] = offset - count [tid] ; } // Compact empty space out of Tp #pragma omp parallel for num_threads(nthreads) schedule(static) for (int tid = 0 ; tid < nthreads ; tid++) { GrB_Index ptr = Tp [range [tid]] ; for (int64_t i = range [tid] ; i < range [tid+1] ; i++) { Tp [i] -= ptr - count [tid] ; } } // finalize T Tp [n] = offset ; // free workspace LAGraph_Free ((void **) &count) ; LAGraph_Free ((void **) &range) ; // import S (unchanged since last export) GrB_TRY (GxB_Matrix_import_CSR (&S, type, nrows, ncols, &Sp, &Sj, &Sx, Sp_size, Sj_size, Sx_size, S_iso, S_jumbled, NULL)) ; // import T for the final phase GrB_TRY (GxB_Matrix_import_CSR (&T, type, nrows, ncols, &Tp, &Tj, &Tx, Tp_siz, Tj_siz, Tx_siz, T_iso, T_jumbled, NULL)) ; // restore G->A G->A = S ; } else { // no sampling; the final phase operates on the whole graph T = S ; } //-------------------------------------------------------------------------- // final phase //-------------------------------------------------------------------------- GrB_TRY (GrB_Matrix_nvals (&nnz, T)) ; bool change = true ; while (change && nnz > 0) { // hooking & shortcutting // mngp = min (mngp, T*gp) using the MIN_SECOND semiring GrB_TRY (GrB_mxv (mngp, NULL, GrB_MIN_UINT64, GrB_MIN_SECOND_SEMIRING_UINT64, T, gp, NULL)) ; // f = min (f, C*mngp) where C is C(i,j) = true if i=V(j) GrB_TRY (Reduce_assign (f, mngp, C, &Cp, &V, &Cx, msg)) ; // f = min (f, mngp, gp) GrB_TRY (GrB_eWiseAdd (f, NULL, GrB_MIN_UINT64, GrB_MIN_UINT64, mngp, gp, NULL)) ; // calculate grandparent: gp_new = f (f) GrB_TRY (GrB_Vector_extractTuples (NULL, V, &n, f)) ; GrB_TRY (GrB_extract (gp_new, NULL, NULL, f, V, n, NULL)) ; // terminate if gp and gb_new are the same GrB_TRY (GrB_eWiseMult (mod, NULL, NULL, GrB_NE_UINT64, gp_new, gp, NULL)) ; GrB_TRY (GrB_reduce (&change, NULL, GrB_LOR_MONOID_BOOL, mod, NULL)) ; // swap gp and gp_new GrB_Vector t = gp ; gp = gp_new ; gp_new = t ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- (*component) = f ; f = NULL ; if (sampling) { GrB_free (&T) ; } LAGraph_FREE_ALL ; return (0) ; #endif }
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l a t e r a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing % smoothing filter for images. It replaces the intensity of each pixel with % a weighted average of intensity values from nearby pixels. This weight is % based on a Gaussian distribution. The weights depend not only on Euclidean % distance of pixels, but also on the radiometric differences (e.g., range % differences, such as color intensity, depth distance, etc.). This preserves % sharp edges. % % The format of the BilateralBlurImage method is: % % Image *BilateralBlurImage(const Image *image,const size_t width, % const size_t height,const double intensity_sigma, % const double spatial_sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the neighborhood in pixels. % % o height: the height of the neighborhood in pixels. % % o intensity_sigma: sigma in the intensity space. A larger value means % that farther colors within the pixel neighborhood (see spatial_sigma) % will be mixed together, resulting in larger areas of semi-equal color. % % o spatial_sigma: sigma in the coordinate space. A larger value means that % farther pixels influence each other as long as their colors are close % enough (see intensity_sigma ). When the neigborhood diameter is greater % than zero, it specifies the neighborhood size regardless of % spatial_sigma. Otherwise, the neigborhood diameter is proportional to % spatial_sigma. % % o exception: return any errors or warnings in this structure. % */ static inline double BlurDistance(const ssize_t x,const ssize_t y, const ssize_t u,const ssize_t v) { return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v))); } static inline double BlurGaussian(const double x,const double sigma) { return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))* PerceptibleReciprocal(Magick2PI*sigma*sigma)); } static double **DestroyBilateralTLS(const ssize_t number_threads, double **weights) { ssize_t i; assert(weights != (double **) NULL); for (i=0; i <= (ssize_t) number_threads; i++) if (weights[i] != (double *) NULL) weights[i]=(double *) RelinquishMagickMemory(weights[i]); weights=(double **) RelinquishMagickMemory(weights); return(weights); } static double **AcquireBilateralTLS(const size_t number_threads, const size_t width,const size_t height) { double **weights; ssize_t i; weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights)); if (weights == (double **) NULL) return((double **) NULL); (void) memset(weights,0,number_threads*sizeof(*weights)); for (i=0; i <= (ssize_t) number_threads; i++) { weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights)); if (weights[i] == (double *) NULL) return(DestroyBilateralTLS(number_threads,weights)); } return(weights); } MagickExport Image *BilateralBlurImage(const Image *image,const size_t width, const size_t height,const double intensity_sigma,const double spatial_sigma, ExceptionInfo *exception) { #define MaxIntensity (255) #define BilateralBlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double intensity_gaussian[2*(MaxIntensity+1)], *spatial_gaussian, **weights; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo mid; ssize_t number_threads, w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } number_threads=(size_t) GetMagickResourceLimit(ThreadResource); weights=AcquireBilateralTLS(number_threads,MagickMax(width,1), MagickMax(height,1)); if (weights == (double **) NULL) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (w=(-MaxIntensity); w < MaxIntensity; w++) intensity_gaussian[w+MaxIntensity]=BlurGaussian((double) w,intensity_sigma); spatial_gaussian=weights[number_threads]; { ssize_t n, v; n=0; mid.x=(ssize_t) (MagickMax(width,1)/2L); mid.y=(ssize_t) (MagickMax(height,1)/2L); for (v=0; v < (ssize_t) MagickMax(height,1); v++) { ssize_t u; for (u=0; u < (ssize_t) MagickMax(width,1); u++) spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y), spatial_sigma); } } /* Bilateral blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { double gamma, pixel; const Quantum *magick_restrict p, *magick_restrict r; ssize_t i, u; ssize_t n, v; /* Tonal weighting preserves edges while smoothing in the flat regions. */ p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,MagickMax(width,1), MagickMax(height,1),exception); if (p == (const Quantum *) NULL) break; p+=(ssize_t) GetPixelChannels(image)*MagickMax(width,1)*mid.y+ GetPixelChannels(image)*mid.x; n=0; for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { double intensity; r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) MagickMax(width,1)* (mid.y-v)+GetPixelChannels(image)*(mid.x-u); intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))- (double) ScaleQuantumToChar(GetPixelIntensity(image,p)); if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity)) weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]* spatial_gaussian[n]; else weights[id][n]=BlurGaussian(intensity,intensity_sigma)* BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma); n++; } } for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { PixelChannel channel; PixelTrait blur_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } pixel=0.0; gamma=0.0; n=0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { r=p+(ssize_t) GetPixelChannels(image)*MagickMax(width,1)* (mid.y-v)+GetPixelChannels(image)*(mid.x-u); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) MagickMax(height,1); v++) { for (u=0; u < (ssize_t) MagickMax(width,1); u++) { double alpha, beta; r=p+(ssize_t) GetPixelChannels(image)*MagickMax(width,1)*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]*alpha*beta; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); } q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BilateralBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); weights=DestroyBilateralTLS(number_threads,weights); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you. % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanline, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanline_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanline)); if (scanline_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanline=(float *) GetVirtualMemoryBlob(scanline_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanline_info=RelinquishVirtualMemory(scanline_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; Quantum *magick_restrict q; ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum((MagickRealType) GetPixelRed(image,p)*mult),q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType) GetPixelGreen(image,p)*mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType) GetPixelBlue(image,p)*mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanline_info=RelinquishVirtualMemory(scanline_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; ssize_t i; /* Generate a 1-D convolution kernel. */ if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; size_t width; ssize_t w, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (w=0; w < (ssize_t) width; w++) { offset[w].x=CastDoubleToLong(ceil((double) (w*point.y)/ hypot(point.x,point.y)-0.5)); offset[w].y=CastDoubleToLong(ceil((double) (w*point.x)/ hypot(point.x,point.y)-0.5)); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; MagickRealType *magick_restrict k; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; preview_image->alpha_trait=UndefinedPixelTrait; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; size_t n; ssize_t w, y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (w=0; w < (ssize_t) n; w++) { cos_theta[w]=cos((double) (theta*w-offset)); sin_theta[w]=sin((double) (theta*w-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; size_t width; ssize_t center, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); { ssize_t i, j, v; j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { ssize_t u; for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; const Quantum *magick_restrict l, *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const MagickRealType *magick_restrict k; const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoTLS(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoTLS(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (IsEventLogging() != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* This kernel appears to be broken. #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif */ unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
boson_basis_core.h
#ifndef _BOSON_BASIS_CORE_H #define _BOSON_BASIS_CORE_H #include <complex> #include <cmath> #include "general_basis_core.h" #include "numpy/ndarraytypes.h" #include "openmp.h" namespace basis_general { template<class I> I inline boson_map_bits(I s,const int map[],const I M[],const int sps,const int N){ I ss = 0; for(int i=N-1;i>=0;--i){ int j = map[i]; ss += ( j<0 ? (sps-(int)(s%sps)-1)*M[j+N] : (int)(s%sps)*M[N-j-1] ); s /= sps; } return ss; } template<class I> class boson_basis_core : public general_basis_core<I> { public: std::vector<I> M; const int sps; boson_basis_core(const int _N, const int _sps) : \ general_basis_core<I>::general_basis_core(_N), sps(_sps) { M.resize(_N); M[0] = (I)1; for(int i=1;i<_N;i++){ M[i] = (M[i-1] * (I)_sps); } } boson_basis_core(const int _N, const int _sps,const int _nt, \ const int _maps[], const int _pers[], const int _qs[]) : \ general_basis_core<I>::general_basis_core(_N,_nt,_maps,_pers,_qs), sps(_sps) { M.resize(_N); M[0] = (I)1; for(int i=1;i<_N;i++){ M[i] = (M[i-1] * (I)_sps); } } ~boson_basis_core() {} I map_state(I s,int n_map,int &sign){ if(general_basis_core<I>::nt<=0){ return s; } const int n = general_basis_core<I>::N; return boson_map_bits(s,&general_basis_core<I>::maps[n_map*n],&M[0],sps,n); } void map_state(I s[],npy_intp P,int n_map,signed char sign[]){ if(general_basis_core<I>::nt<=0){ return; } const int n = general_basis_core<I>::N; const int * map = &general_basis_core<I>::maps[n_map*n]; #pragma omp for schedule(static) for(npy_intp i=0;i<P;i++){ s[i] = boson_map_bits(s[i],map,&M[0],sps,n); } } std::vector<int> count_particles(const I r){ std::vector<int> v(1); int n = 0; I s = r; for(int i=0;i<general_basis_core<I>::N;i++){ n += (int)(s%sps); s /= sps; } v[0] = n; return v; } I inline next_state_pcon(const I r){ if(r == 0){ return r; } I s = r; int n=0; for(int i=0;i<general_basis_core<I>::N-1;i++){ int b1 = (int)((s/M[i])%sps); if(b1>0){ n += b1; int b2 = (int)((s/M[i+1])%sps); if(b2<(sps-1)){ n -= 1; s -= M[i]; s += M[i+1]; if(n>0){ int l = n/(sps-1); int n_left = n%(sps-1); for(int j=0;j<(i+1);j++){ s -= (int)((s/M[j])%sps) * M[j]; if(j<l){ s += (sps-1)*M[j]; } else if(j == l){ s += n_left*M[j]; } } } break; } } } return s; } int op(I &r,std::complex<double> &me,const int n_op,const char opstr[],const int indx[]){ I s = r; double me_offdiag=1; double me_diag=1; double S = (sps-1.0)/2.0; for(int j=n_op-1;j>-1;j--){ int ind = general_basis_core<I>::N-indx[j]-1; int occ = (int)((r/M[ind])%sps); I b = M[ind]; char op = opstr[j]; switch(op){ case 'z': me_diag *= (occ-S); case 'n': me_diag *= occ; break; case '+': me_offdiag *= (occ+1)%sps; r += ((occ+1)<sps?b:0); break; case '-': me_offdiag *= occ; r -= (occ>0?b:0); break; case 'I': break; default: return -1; } if(std::abs(me_diag*me_offdiag)==0){ r = s; break; } } me *= me_diag*std::sqrt(me_offdiag); return 0; } }; } #endif
simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp=libiomp5 -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp simd'}} #pragma omp simd safelen(4) void test_no_clause() { int i; #pragma omp simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp simd' must be a for loop}} #pragma omp simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd; for (i = 0; i < 16; ++i) ; // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd firstprivate(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd private(x); for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{extra tokens at the end of '#pragma omp simd' are ignored}} #pragma omp simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, ) for (i = 0; i < 16; ++i) ; // xxpected-error@+1 {{expected expression}} #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a positive integer value}} #pragma omp simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; // expected-error@+1 {{expected '('}} #pragma omp simd collapse for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd collapse() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd collapse(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp simd collapse 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} #pragma omp simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp simd', but found only 1}} // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp simd collapse(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp simd collapse(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp simd collapse(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'collapse' clause must be a positive integer value}} #pragma omp simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as reduction}} #pragma omp parallel #pragma omp simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+3 {{reduction variable must be shared}} // expected-error@+2 {{private variable cannot be reduction}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp simd private( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd private(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd private(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{unexpected OpenMP clause 'firstprivate' in directive '#pragma omp simd'}} // expected-error@+1 {{expected expression}} #pragma omp simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, for (i = 0; i < 16; ++i) ; // expected-error@+1 2 {{expected expression}} #pragma omp simd lastprivate(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd lastprivate(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction() for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction( : x) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(, for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp simd reduction(+ for (i = 0; i < 16; ++i) ; // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+: for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected identifier}} #pragma omp simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
GB_unaryop__lnot_uint8_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint8_int64 // op(A') function: GB_tran__lnot_uint8_int64 // C type: uint8_t // A type: int64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ uint8_t z = (uint8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint8_int64 ( uint8_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fp32_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_fc64 // op(A') function: GB_unop_tran__identity_fp32_fc64 // C type: float // A type: GxB_FC64_t // cast: float cij = (float) creal (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) creal (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) creal (aij) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_fc64 ( float *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; float z = (float) creal (aij) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; float z = (float) creal (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test_encap_decap.c
/* Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /* Encapsulate a secret and use the secret to encrypt a message Decapsulate the secret and use the secret to decrypt the encrypted message */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <amcl/utils.h> #include <amcl/randapi.h> #include <amcl/bls_BLS381.h> #include <oqs/oqs.h> #include <amcl/pqnist.h> #define NTHREADS 8 #define MAXSIZE 256 #define G2LEN 4*BFS_BLS381 int main() { int i,rc; // Seed value for CSPRNG char seed[PQNIST_SEED_LENGTH]; octet SEED = {sizeof(seed),sizeof(seed),seed}; // Seed value for key generation char seedkeys[NTHREADS][PQNIST_SEED_LENGTH]; csprng RNG; // Initialization vector char iv[PQNIST_AES_IV_LENGTH]; octet IV= {sizeof(iv),sizeof(iv),iv}; // Message to be sent to Bob char p[NTHREADS][MAXSIZE]; octet P[NTHREADS]; // AES CBC ciphertext char c[NTHREADS][MAXSIZE]; octet C[NTHREADS]; // non random seed value for (i=0; i<32; i++) SEED.val[i]=i+1; printf("SEED: "); OCT_output(&SEED); printf("\n"); // initialise random number generator CREATE_CSPRNG(&RNG,&SEED); // Initialise key generation seed for(i=0; i<NTHREADS; i++) { for(int j=0; j<PQNIST_SEED_LENGTH; j++) { seedkeys[i][j] = i; } } // Bob's SIKE keys uint8_t SIKEpk[NTHREADS][OQS_KEM_sike_p751_length_public_key]; uint8_t SIKEsk[NTHREADS][OQS_KEM_sike_p751_length_secret_key]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { rc = pqnist_sike_keys(seedkeys[i], SIKEpk[i], SIKEsk[i]); if (rc) { fprintf(stderr, "FAILURE pqnist_keys rc: %d\n", rc); OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); exit(EXIT_FAILURE); } int j = OQS_KEM_sike_p751_length_public_key; printf("Bob SIKE pklen %d pk: ", j); amcl_print_hex(SIKEpk[i], j); j = OQS_KEM_sike_p751_length_secret_key; printf("Bob SIKE sklen %d sk: ", j); amcl_print_hex(SIKEsk[i], j); } // Alice for(i=0; i<NTHREADS; i++) { bzero(p[i],sizeof(p[i])); P[i].max = MAXSIZE; P[i].len = sprintf(p[i], "Hello Bob! This is a message from Alice %d", i); P[i].val = p[i]; // Pad message int l = 16 - (P[i].len % 16); if (l < 16) { OCT_jbyte(&P[i],0,l); } } // Random initialization value generateRandom(&RNG,&IV); printf("Alice IV: "); OCT_output(&IV); // Copy plaintext for(i=0; i<NTHREADS; i++) { C[i].val = c[i]; C[i].max = MAXSIZE; OCT_copy(&C[i],&P[i]); printf("Alice Plaintext: "); OCT_output_string(&C[i]); printf("\n"); } // SIKE encapsulated key uint8_t ek[NTHREADS][OQS_KEM_sike_p751_length_ciphertext]; #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Generate an AES which is ecapsulated using SIKE. Use this key to // AES encrypt the K parameter. rc = pqnist_encapsulate_encrypt(C[i].val, C[i].len, IV.val, SIKEpk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_encapsulate_encrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Alice ciphertext: "); OCT_output(&C[i]); printf("Alice ek %lu ek: ", sizeof(ek[i])); amcl_print_hex(ek[i], sizeof(ek[i])); printf("\n"); } // Bob #pragma omp parallel for for(i=0; i<NTHREADS; i++) { // Obtain encapsulated AES key and decrypt C rc = pqnist_decapsulate_decrypt(C[i].val, C[i].len, IV.val, SIKEsk[i], ek[i]); if(rc) { fprintf(stderr, "FAILURE pqnist_decapsulate_decrypt rc: %d\n", rc); exit(EXIT_FAILURE); } printf("Bob Plaintext: "); OCT_output(&C[i]); printf("Bob Plaintext: "); OCT_output_string(&C[i]); printf("\n"); // Compare sent and recieved message (returns 0 for failure) rc = OCT_comp(&P[i],&C[i]); if(!rc) { fprintf(stderr, "FAILURE OCT_comp rc: %d\n", rc); exit(EXIT_FAILURE); } } // clear memory OCT_clear(&IV); for(i=0; i<NTHREADS; i++) { OQS_MEM_cleanse(SIKEsk[i], OQS_KEM_sike_p751_length_secret_key); OCT_clear(&P[i]); OCT_clear(&C[i]); } KILL_CSPRNG(&RNG); printf("SUCCESS\n"); exit(EXIT_SUCCESS); }
GetEnvironInfo.c
/****************************************************************************** * FILE: omp_getEnvInfo.c * DESCRIPTION: * OpenMP Example - Get Environment Information - C/C++ Version * The master thread queries and prints selected environment information. * AUTHOR: Blaise Barney 7/06 * LAST REVISED: 05/18/16 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> int main (int argc, char *argv[]) { int nthreads, tid, procs, maxt, inpar, dynamic, nested; /* Start parallel region */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ if (tid == 0) { printf("Thread %d getting environment info...\n", tid); /* Get environment information */ procs = omp_get_num_procs(); nthreads = omp_get_num_threads(); maxt = omp_get_max_threads(); inpar = omp_in_parallel(); dynamic = omp_get_dynamic(); nested = omp_get_nested(); /* Print environment information */ printf("Number of processors = %d\n", procs); printf("Number of threads = %d\n", nthreads); printf("Max threads = %d\n", maxt); printf("In parallel? = %d\n", inpar); printf("Dynamic threads enabled? = %d\n", dynamic); printf("Nested parallelism enabled? = %d\n", nested); } } /* Done */ }
GB_subassign_09.c
//------------------------------------------------------------------------------ // GB_subassign_09: C(I,J)<M,repl> = scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 09: C(I,J)<M,repl> = scalar ; using S // M: present // Mask_comp: false // C_replace: true // accum: NULL // A: scalar // S: constructed // C: not bitmap or full #include "GB_unused.h" #include "GB_subassign_methods.h" GrB_Info GB_subassign_09 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap GB_GET_MASK ; GB_GET_SCALAR ; GB_GET_S ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 09: C(I,J)<M,repl> = scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal. All entries in M+S must be examined. All entries in S // are modified: if M(i,j)=1 then S(i,j) is used to write to the // corresponding entry in C. If M(i,j) is not present, or zero, then the // entry in C is cleared (because of C_replace). If S(i,j) is not present, // and M(i,j)=1, then the scalar is inserted into C. The only case that // can be skipped is if neither S nor M is present. As a result, this // method need not traverse all of IxJ. It can limit its traversal to the // pattern of M+S. // Method 09 and Method 11 are very similar. //-------------------------------------------------------------------------- // Parallel: M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (M_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all M+S GB_SUBASSIGN_TWO_SLICE (M, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (M_is_bitmap) { //---------------------------------------------------------------------- // phase1: M is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iM_start, iM_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iM_start:iM_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iM_start) ; int64_t pM_start = j * Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j) //-------------------------------------------------------------- for (int64_t iM = iM_start ; iM < iM_end ; iM++) { int64_t pM = pM_start + iM ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ; bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ; if (Sfound && !mij) { // S (i,j) is present but M (i,j) is false // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (!Sfound && mij) { // S (i,j) is not present, M (i,j) is true // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } else if (Sfound && mij) { // S (i,j) present and M (i,j) is true GB_C_S_LOOKUP ; // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =A ): copy A, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: M is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get S(:,j) and M(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and M(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iM = GBI (Mi, pM, Mvlen) ; if (iS < iM) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_C_S_LOOKUP ; if (GB_mcast (Mx, pM, msize)) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): now zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (M) ; } } // while list S (:,j) has entries. List M (:,j) exhausted. while (pS < pS_end) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]----------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // while list M (:,j) has entries. List S (:,j) exhausted. while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (M_is_bitmap) { //---------------------------------------------------------------------- // phase2: M is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iM_start, iM_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iM_start:iM_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iM_start) ; int64_t pM_start = j * Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iM_start:iM_end,j) and M(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iM = iM_start ; iM < iM_end ; iM++) { int64_t pM = pM_start + iM ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iM) ; bool mij = Mb [pM] && GB_mcast (Mx, pM, msize) ; if (!Sfound && mij) { // S (i,j) is not present, M (i,j) is true // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: M is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get S(:,j) and M(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X, Mvlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and M(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iM = GBI (Mi, pM, Mvlen) ; if (iS < iM) { // S (i,j) is present but M (i,j) is not GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_NEXT (S) ; GB_NEXT (M) ; } } // while list M (:,j) has entries. List S (:,j) exhausted. while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present if (GB_mcast (Mx, pM, msize)) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iM = GBI (Mi, pM, Mvlen) ; int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
math_libmextras.c
#include <stdio.h> #include <math.h> #include <omp_libmextras.h> #define FUNC(x) sinpi(x) #define TYPE double #define EPS 1e-5 void vmul(TYPE*a, TYPE*b, TYPE*c, int N){ #pragma omp target map(to: a[0:N],b[0:N]) map(from:c[0:N]) #pragma omp teams distribute parallel for for(int i=0; i<N; i++) { c[i]=FUNC(a[i])*FUNC(b[i]); } } int main(){ const int N = 100; TYPE a[N], b[N], c[N], validate[N]; TYPE delta, delta_max=0; int flag=-1, flag_max=-1; // Mark Success for(int i=0; i<N; i++) { a[i]=i+1; b[i]=i+2; validate[i]=FUNC(a[i])*FUNC(b[i]); } vmul(a,b,c,N); for(int i=0; i<N; i++) { delta=fabs(c[i]-validate[i]); if(delta > EPS) { // print 1st bad index if(flag == -1) printf("First fail: c[%d](%g) != validate[%d](%g) <%g>\n", i,c[i],i,validate[i],c[i]-validate[i]); if(delta > delta_max) { delta_max=delta; flag_max=flag; } flag = i; } } if(flag == -1){ printf("Success\n"); return 0; } else { printf("Last fail: c[%d](%g) != validate[%d](%g) <%g>\n", flag,c[flag],flag,validate[flag],c[flag]-validate[flag]); printf("Max fail: c[%d](%g) != validate[%d](%g) <%g>\n", flag_max, c[flag_max],flag_max,validate[flag_max],c[flag_max]-validate[flag_max]); printf("Fail\n"); return 1; } }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
kmeans_clustering.c
/*****************************************************************************/ /*IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. */ /*By downloading, copying, installing or using the software you agree */ /*to this license. If you do not agree to this license, do not download, */ /*install, copy or use the software. */ /* */ /* */ /*Copyright (c) 2005 Northwestern University */ /*All rights reserved. */ /*Redistribution of the software in source and binary forms, */ /*with or without modification, is permitted provided that the */ /*following conditions are met: */ /* */ /*1 Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* */ /*2 Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in the */ /* documentation and/or other materials provided with the distribution.*/ /* */ /*3 Neither the name of Northwestern University nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* */ /*THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS */ /*IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED */ /*TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT AND */ /*FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL */ /*NORTHWESTERN UNIVERSITY OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, */ /*INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /*(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR */ /*SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) */ /*HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, */ /*STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /*ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /*POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************************/ /*************************************************************************/ /** File: kmeans_clustering.c **/ /** Description: Implementation of regular k-means clustering **/ /** algorithm **/ /** Author: Wei-keng Liao **/ /** ECE Department, Northwestern University **/ /** email: wkliao@ece.northwestern.edu **/ /** **/ /** Edited by: Jay Pisharath **/ /** Northwestern University. **/ /** **/ /** ================================================================ **/ /** * **/ /** Edited by: Sang-Ha Lee * **/ /** University of Virginia * **/ /** * **/ /** Description: No longer supports fuzzy c-means clustering; * **/ /** only regular k-means clustering. * **/ /** Simplified for main functionality: regular * k-means **/ /** clustering. * **/ /** **/ /*************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include "kmeans.h" #include <omp.h> #define RANDOM_MAX 2147483647 #ifndef FLT_MAX #define FLT_MAX 3.40282347e+38 #endif extern double wtime(void); int find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float **pts, /* [npts][nfeatures] */ int npts) { int index, i; float min_dist = FLT_MAX; /* find the cluster center id with min distance to pt */ for (i = 0; i < npts; i++) { float dist; dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */ if (dist < min_dist) { min_dist = dist; index = i; } } return (index); } /*----< euclid_dist_2() >----------------------------------------------------*/ /* multi-dimensional spatial Euclid distance square */ __inline float euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans = 0.0; for (i = 0; i < numdims; i++) ans += (pt1[i] - pt2[i]) * (pt1[i] - pt2[i]); return (ans); } /*----< kmeans_clustering() >---------------------------------------------*/ float **kmeans_clustering(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int *membership) /* out: [npoints] */ { int i, j, k, n = 0, index, loop = 0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float **new_centers; /* [nclusters][nfeatures] */ float **clusters; /* out: [nclusters][nfeatures] */ float delta; double timing; int nthreads; int **partial_new_centers_len; float ***partial_new_centers; nthreads = omp_get_max_threads(); /* allocate space for returning variable clusters[] */ clusters = (float **)malloc(nclusters * sizeof(float *)); clusters[0] = (float *)malloc(nclusters * nfeatures * sizeof(float)); for (i = 1; i < nclusters; i++) clusters[i] = clusters[i - 1] + nfeatures; /* randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { // n = (int)rand() % npoints; for (j = 0; j < nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } for (i = 0; i < npoints; i++) membership[i] = -1; /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int *)calloc(nclusters, sizeof(int)); new_centers = (float **)malloc(nclusters * sizeof(float *)); new_centers[0] = (float *)calloc(nclusters * nfeatures, sizeof(float)); for (i = 1; i < nclusters; i++) new_centers[i] = new_centers[i - 1] + nfeatures; partial_new_centers_len = (int **)malloc(nthreads * sizeof(int *)); partial_new_centers_len[0] = (int *)calloc(nthreads * nclusters, sizeof(int)); for (i = 1; i < nthreads; i++) partial_new_centers_len[i] = partial_new_centers_len[i - 1] + nclusters; partial_new_centers = (float ***)malloc(nthreads * sizeof(float **)); partial_new_centers[0] = (float **)malloc(nthreads * nclusters * sizeof(float *)); for (i = 1; i < nthreads; i++) partial_new_centers[i] = partial_new_centers[i - 1] + nclusters; for (i = 0; i < nthreads; i++) { for (j = 0; j < nclusters; j++) partial_new_centers[i][j] = (float *)calloc(nfeatures, sizeof(float)); } do { delta = 0.0; #pragma omp parallel shared(feature, clusters, membership, \ partial_new_centers, partial_new_centers_len) { int tid = omp_get_thread_num(); #pragma omp for private(i, j, index) firstprivate( \ npoints, nclusters, nfeatures) schedule(static) reduction(+ : delta) for (i = 0; i < npoints; i++) { /* find the index of nestest cluster centers */ index = find_nearest_point(feature[i], nfeatures, clusters, nclusters); /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0; /* assign the membership to object i */ membership[i] = index; /* update new cluster centers : sum of all objects located within */ partial_new_centers_len[tid][index]++; for (j = 0; j < nfeatures; j++) partial_new_centers[tid][index][j] += feature[i][j]; } } /* let the main thread perform the array reduction */ for (i = 0; i < nclusters; i++) { for (j = 0; j < nthreads; j++) { new_centers_len[i] += partial_new_centers_len[j][i]; partial_new_centers_len[j][i] = 0.0; for (k = 0; k < nfeatures; k++) { new_centers[i][k] += partial_new_centers[j][i][k]; partial_new_centers[j][i][k] = 0.0; } } } /* replace old cluster centers with new_centers */ for (i = 0; i < nclusters; i++) { for (j = 0; j < nfeatures; j++) { if (new_centers_len[i] > 0) clusters[i][j] = new_centers[i][j] / new_centers_len[i]; new_centers[i][j] = 0.0; /* set back to 0 */ } new_centers_len[i] = 0; /* set back to 0 */ } } while (delta > threshold && loop++ < 500); free(new_centers[0]); free(new_centers); free(new_centers_len); return clusters; }
GB_subassign_11.c
//------------------------------------------------------------------------------ // GB_subassign_11: C(I,J)<M,repl> += scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 11: C(I,J)<M,repl> += scalar ; using S // M: present // Mask_comp: false // C_replace: true // accum: present // A: scalar // S: constructed #define GB_FREE_WORK GB_FREE_TWO_SLICE #include "GB_subassign_methods.h" GrB_Info GB_subassign_11 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const GrB_BinaryOp accum, const void *scalar, const GrB_Type atype, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; // GB_GET_MASK ; const int64_t *restrict Mp = M->p ; // const int64_t *restrict Mh = M->h ; const int64_t *restrict Mi = M->i ; const GB_void *restrict Mx = M->x ; const size_t msize = M->type->size ; GB_cast_function cast_M = GB_cast_factory (GB_BOOL_code, M->type->code) ; GB_GET_ACCUM_SCALAR ; GB_GET_S ; //-------------------------------------------------------------------------- // Method 11: C(I,J)<M,repl> += scalar ; using S //-------------------------------------------------------------------------- // Time: Optimal. All entries in M+S must be examined. All entries in S // are modified: if M(i,j)=1 then S(i,j) is used to write to the // corresponding entry in C. If M(i,j) is not present, or zero, then the // entry in C is cleared (because of C_replace). If S(i,j) is not present, // and M(i,j)=1, then the scalar is inserted into C. The only case that // can be skipped is if neither S nor M is present. As a result, this // method need not traverse all of IxJ. It can limit its traversal to the // pattern of M+S. // Method 09 and Method 11 are very similar. //-------------------------------------------------------------------------- // Parallel: Z=M+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- GB_SUBASSIGN_TWO_SLICE (M, S) ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get S(:,j) and M(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and M(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = Si [pS] ; int64_t iM = Mi [pM] ; if (iS < iM) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]----------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present bool mij ; cast_M (&mij, Mx +(pM*msize), 0) ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present bool mij ; cast_M (&mij, Mx +(pM*msize), 0) ; GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_scalar ; } else { // ----[C A 0] or [X A 0]------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (M) ; } } // while list S (:,j) has entries. List M (:,j) exhausted while (pS < pS_end) { // S (i,j) is present but M (i,j) is not // ----[C A 0] or [X A 0]----------------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; GB_NEXT (S) ; } // while list M (:,j) has entries. List S (:,j) exhausted while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present // mij = (bool) M [pM] bool mij ; cast_M (&mij, Mx +(pM*msize), 0) ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (M) ; } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get S(:,j) and M(:,j) //------------------------------------------------------------------ int64_t j = (Zh == NULL) ? k : Zh [k] ; GB_GET_MAPPED_VECTOR (pM, pM_end, pA, pA_end, Mp, j, k, Z_to_X) ; GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ; //------------------------------------------------------------------ // do a 2-way merge of S(:,j) and M(:,j) //------------------------------------------------------------------ // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and M (:,j) have entries while (pS < pS_end && pM < pM_end) { int64_t iS = Si [pS] ; int64_t iM = Mi [pM] ; if (iS < iM) { // S (i,j) is present but M (i,j) is not GB_NEXT (S) ; } else if (iM < iS) { // S (i,j) is not present, M (i,j) is present bool mij ; cast_M (&mij, Mx +(pM*msize), 0) ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } else { // both S (i,j) and M (i,j) present GB_NEXT (S) ; GB_NEXT (M) ; } } // while list M (:,j) has entries. List S (:,j) exhausted while (pM < pM_end) { // S (i,j) is not present, M (i,j) is present // mij = (bool) M [pM] bool mij ; cast_M (&mij, Mx +(pM*msize), 0) ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iM = Mi [pM] ; int64_t iC = GB_ijlist (I, iM, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } GB_NEXT (M) ; } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
ast-dump-openmp-for.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp for for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp for for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp for collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp for collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-for.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:4:1, col:16> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:10:1, col:16> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:17:1, col:28> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:26> 'int' // CHECK-NEXT: | | |-value: Int 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:24:1, col:28> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:17, col:27> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:26> 'int' // CHECK-NEXT: | | |-value: Int 2 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:26> 'int' 2 // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPForDirective {{.*}} <line:31:1, col:28> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:17, col:27> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:26> 'int' // CHECK-NEXT: | |-value: Int 2 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:26> 'int' 2 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-for.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:5> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
par_csr_matvec.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matvec functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec *--------------------------------------------------------------------------*/ // y = alpha*A*x + beta*b HYPRE_Int hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *b, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *b_local = hypre_ParVectorLocalVector(b); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *x_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride( x_local ); HYPRE_Int idxstride = hypre_VectorIndexStride( x_local ); HYPRE_Complex *x_tmp_data, **x_buf_data; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_assert( idxstride > 0 ); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size || num_rows != b_size) { ierr = 12; } if (num_cols != x_size && (num_rows != y_size || num_rows != b_size)) { ierr = 13; } hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); } else { hypre_assert( num_vectors > 1 ); x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors ); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* x_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(x_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(x_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE); x_tmp_data = hypre_VectorData(x_tmp); /* x_buff_data */ x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle); continue; #endif } x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); //hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv]; HYPRE_Complex *locl_data = x_local_data + jv * vecstride; /* if on device, no need to Sync: send_data is on device memory */ #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* pack send data on device */ HYPRE_THRUST_CALL( gather, hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) + hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), locl_data, send_data ); #elif defined(HYPRE_USING_DEVICE_OPENMP) /* pack send data on device */ HYPRE_Int i; HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); #pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts) for (i = start; i < end; i++) { send_data[i] = locl_data[device_send_map_elmts[i]]; } #else HYPRE_Int i; /* pack send data on host */ #if defined(HYPRE_USING_OPENMP) #pragma omp parallel for HYPRE_SMP_SCHEDULE #endif for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } #endif } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication starts */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv], HYPRE_MEMORY_DEVICE, &x_tmp_data[jv * num_cols_offd] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* computation offd part */ if (num_cols_offd) { hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local ); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } HYPRE_Int hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y); } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvecT * * Performs y <- alpha * A^T * x + beta * y * *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y ) { hypre_ParCSRCommHandle **comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A); hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); hypre_Vector *y_tmp; HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, jv; HYPRE_Int vecstride = hypre_VectorVectorStride(y_local); HYPRE_Int idxstride = hypre_VectorIndexStride(y_local); HYPRE_Complex *y_tmp_data, **y_buf_data; HYPRE_Complex *y_local_data = hypre_VectorData(y_local); #if defined(HYPRE_USING_GPU) HYPRE_Int sync_stream; hypre_GetSyncCudaCompute(&sync_stream); hypre_SetSyncCudaCompute(0); #endif HYPRE_ANNOTATE_FUNC_BEGIN; /*--------------------------------------------------------------------- * Check for size compatibility. MatvecT returns ierr = 1 if * length of X doesn't equal the number of rows of A, * ierr = 2 if the length of Y doesn't equal the number of * columns of A, and ierr = 3 if both are true. * * Because temporary vectors are often used in MatvecT, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ if (num_rows != x_size) { ierr = 1; } if (num_cols != y_size) { ierr = 2; } if (num_rows != x_size && num_cols != y_size) { ierr = 3; } hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors ); hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors ); if ( num_vectors == 1 ) { y_tmp = hypre_SeqVectorCreate(num_cols_offd); } else { hypre_assert( num_vectors > 1 ); y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) ); hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif HYPRE_Int use_persistent_comm = 0; #ifdef HYPRE_USING_PERSISTENT_COMM use_persistent_comm = num_vectors == 1; // JSP TODO: we can use persistent communication for multi-vectors, // but then we need different communication handles for different // num_vectors. hypre_ParCSRPersistentCommHandle *persistent_comm_handle; #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg); #endif } else { comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST); } /* y_tmp */ #if defined(HYPRE_USING_GPU) /* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */ if (num_vectors == 1) { if (!hypre_ParCSRCommPkgTmpData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE); #endif } hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg); hypre_SeqVectorSetDataOwner(y_tmp, 0); } #else if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer( persistent_comm_handle); hypre_SeqVectorSetDataOwner(y_tmp, 0); #endif } #endif hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE); y_tmp_data = hypre_VectorData(y_tmp); /* y_buf_data */ y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST); for (jv = 0; jv < num_vectors; ++jv) { #if defined(HYPRE_USING_GPU) if (jv == 0) { if (!hypre_ParCSRCommPkgBufData(comm_pkg)) { #if 1 hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); #else hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_MEMORY_DEVICE); #endif } y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg); continue; } #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle); continue; #endif } y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif if (num_cols_offd) { if (offdT) { // offdT is optional. Used only if it's present hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp); } else { hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { /* this is where we assume multivectors are 'column' storage */ comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv * num_cols_offd], HYPRE_MEMORY_DEVICE, y_buf_data[jv] ); } } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); #endif /* overlapped local computation */ if (diagT) { // diagT is optional. Used only if it's present. hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local); } else { hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime(); #endif /* nonblocking communication ends */ if (use_persistent_comm) { #ifdef HYPRE_USING_PERSISTENT_COMM hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]); #endif } else { for ( jv = 0; jv < num_vectors; ++jv ) { hypre_ParCSRCommHandleDestroy(comm_handle[jv]); comm_handle[jv] = NULL; } hypre_TFree(comm_handle, HYPRE_MEMORY_HOST); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime(); hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime(); #endif /* The assert is because the following loop only works for 'column' storage of a multivector. This needs to be fixed to work more generally, at least for 'row' storage. This in turn, means either change CommPkg so num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put a stride in the logic of CommHandleCreate (stride either from a new arg or a new variable inside CommPkg). Or put the num_vector iteration inside CommHandleCreate (perhaps a new multivector variant of it). */ hypre_assert( idxstride == 1 ); /* send_map_elmts on device */ hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg); for (jv = 0; jv < num_vectors; ++jv) { HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv]; HYPRE_Complex *locl_data = y_local_data + jv * vecstride; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /* unpack recv data on device */ if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg)) { hypre_ParCSRCommPkgWorkSpace(comm_pkg) = hypre_TAlloc( char, (2 * sizeof(HYPRE_Int) + sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_DEVICE ); } hypreDevice_GenScatterAdd(locl_data, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg), recv_data, hypre_ParCSRCommPkgWorkSpace(comm_pkg)); #elif defined(HYPRE_USING_DEVICE_OPENMP) HYPRE_Int i, j; /* unpack recv data on device */ for (i = 0; i < num_sends; i++) { HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg); HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); #pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts) for (j = start; j < end; j++) { locl_data[device_send_map_elmts[j]] += recv_data[j]; } } #else HYPRE_Int i; /* unpack recv data on host, TODO OMP? */ for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); i ++) { locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)] += recv_data[i]; } #endif } hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL; if (!use_persistent_comm) { for ( jv = 0; jv < num_vectors; ++jv ) { #if defined(HYPRE_USING_GPU) if (jv == 0) { continue; } #endif hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE); } hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_GPU) hypre_SetSyncCudaCompute(sync_stream); hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime(); #endif HYPRE_ANNOTATE_FUNC_END; return ierr; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixMatvec_FF *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, hypre_ParVector *x, HYPRE_Complex beta, hypre_ParVector *y, HYPRE_Int *CF_marker, HYPRE_Int fpt ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommHandle *comm_handle; hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A); hypre_Vector *x_tmp; HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x); HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd); HYPRE_Int ierr = 0; HYPRE_Int num_sends, i, j, index, start, num_procs; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Complex *x_tmp_data = NULL; HYPRE_Complex *x_buf_data = NULL; HYPRE_Complex *x_local_data = hypre_VectorData(x_local); /*--------------------------------------------------------------------- * Check for size compatibility. ParMatvec returns ierr = 11 if * length of X doesn't equal the number of columns of A, * ierr = 12 if the length of Y doesn't equal the number of rows * of A, and ierr = 13 if both are true. * * Because temporary vectors are often used in ParMatvec, none of * these conditions terminates processing, and the ierr flag * is informational only. *--------------------------------------------------------------------*/ hypre_MPI_Comm_size(comm, &num_procs); if (num_cols != x_size) { ierr = 11; } if (num_rows != y_size) { ierr = 12; } if (num_cols != x_size && num_rows != y_size) { ierr = 13; } if (num_procs > 1) { if (num_cols_offd) { x_tmp = hypre_SeqVectorCreate( num_cols_offd ); hypre_SeqVectorInitialize(x_tmp); x_tmp_data = hypre_VectorData(x_tmp); } /*--------------------------------------------------------------------- * If there exists no CommPkg for A, a CommPkg is generated using * equally load balanced partitionings *--------------------------------------------------------------------*/ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_sends) x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) x_buf_data[index++] = x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data ); } hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker, CF_marker, fpt); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_sends) int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart (comm_pkg, num_sends), HYPRE_MEMORY_HOST); if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd ); hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local, CF_marker, CF_marker_offd, fpt); hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL; hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); } return ierr; }
starwars.c
/* Star Wars cellular automaton (rule 245/2/4) * $ cc -O3 -fopenmp -o starwars starwars.c * $ ./starwars | mpv --no-correct-pts --fps=15 --fs - * $ ./starwars | x264 --frames=900 --fps=15 -o starwars.mp4 /dev/stdin * Ref: https://www.conwaylife.com/wiki/OCA:Star_Wars */ #include <time.h> #include <stdio.h> #define W (1920/S) #define H (1080/S) #define S 2 static const long colors[] = {0x111111, 0xffffff, 0xff00ff, 0x0000ff}; #define STATES "02300230123001300130013002300230" int main(void) { #ifdef _WIN32 /* Set stdout to binary mode. */ int _setmode(int, int); _setmode(1, 0x8000); #endif static char state[2][H][W]; unsigned long long s = time(0); for (int y = 0; y < H; y++) { for (int x = 0; x < W; x++) { s = s*0x243f6a8885a308d3 + 1; state[0][y][x] = (s >> 63) & 1; } } for (int i = 0; ; i = !i) { #pragma omp parallel for for (int y = 0; y < H; y++) { for (int x = 0; x < W; x++) { int c0 = state[i][y][x]; int c1 = (state[i][(y+H+0)%H][(x+W+1)%W] == 1) + (state[i][(y+H+0)%H][(x+W-1)%W] == 1) + (state[i][(y+H+1)%H][(x+W+1)%W] == 1) + (state[i][(y+H+1)%H][(x+W+0)%W] == 1) + (state[i][(y+H+1)%H][(x+W-1)%W] == 1) + (state[i][(y+H-1)%H][(x+W+1)%W] == 1) + (state[i][(y+H-1)%H][(x+W+0)%W] == 1) + (state[i][(y+H-1)%H][(x+W-1)%W] == 1); state[!i][y][x] = STATES[(c1<<2 | c0)&0x1f] - '0'; } } static unsigned char ppm[H*S][W*S][3]; for (int y = 0; y < H*S; y++) { for (int x = 0; x < W*S; x++) { long c = colors[(int)state[!i][y/S][x/S]]; ppm[y][x][0] = c >> 16; ppm[y][x][1] = c >> 8; ppm[y][x][2] = c >> 0; } } printf("P6\n%d %d\n255\n", W*S, H*S); if (!fwrite(ppm, sizeof(ppm), 1, stdout)) return 1; } }
convolution_sgemm_pack8to1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack8to1_fp16sa_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const __fp16* bias = _bias; // permute Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 16u, 8, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 16u, 8, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 8, opt.workspace_allocator); { int nn_size = size / 8; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; __fp16* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += size * 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { // transpose 8x4 asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += size * 8; } } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i * 8; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += size * 8; } } } } int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; __fp16* outptr0 = top_blob.channel(p); __fp16* outptr1 = top_blob.channel(p + 1); __fp16* outptr2 = top_blob.channel(p + 2); __fp16* outptr3 = top_blob.channel(p + 3); __fp16* outptr4 = top_blob.channel(p + 4); __fp16* outptr5 = top_blob.channel(p + 5); __fp16* outptr6 = top_blob.channel(p + 6); __fp16* outptr7 = top_blob.channel(p + 7); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p : zeros; float16x8_t _bias0 = vld1q_f16(biasptr); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 8); int nn = inch * maxk; // inch always > 0 asm volatile( "dup v24.8h, %22.h[0] \n" "dup v25.8h, %22.h[1] \n" "dup v26.8h, %22.h[2] \n" "dup v27.8h, %22.h[3] \n" "dup v28.8h, %22.h[4] \n" "dup v29.8h, %22.h[5] \n" "dup v30.8h, %22.h[6] \n" "dup v31.8h, %22.h[7] \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.8h, v16.8h, v0.h[0] \n" "fmla v25.8h, v16.8h, v0.h[1] \n" "fmla v26.8h, v16.8h, v0.h[2] \n" "fmla v27.8h, v16.8h, v0.h[3] \n" "fmla v28.8h, v16.8h, v0.h[4] \n" "fmla v29.8h, v16.8h, v0.h[5] \n" "fmla v30.8h, v16.8h, v0.h[6] \n" "fmla v31.8h, v16.8h, v0.h[7] \n" "fmla v24.8h, v17.8h, v1.h[0] \n" "fmla v25.8h, v17.8h, v1.h[1] \n" "fmla v26.8h, v17.8h, v1.h[2] \n" "fmla v27.8h, v17.8h, v1.h[3] \n" "fmla v28.8h, v17.8h, v1.h[4] \n" "fmla v29.8h, v17.8h, v1.h[5] \n" "fmla v30.8h, v17.8h, v1.h[6] \n" "fmla v31.8h, v17.8h, v1.h[7] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%9], #64 \n" "fmla v24.8h, v18.8h, v2.h[0] \n" "fmla v25.8h, v18.8h, v2.h[1] \n" "fmla v26.8h, v18.8h, v2.h[2] \n" "fmla v27.8h, v18.8h, v2.h[3] \n" "fmla v28.8h, v18.8h, v2.h[4] \n" "fmla v29.8h, v18.8h, v2.h[5] \n" "fmla v30.8h, v18.8h, v2.h[6] \n" "fmla v31.8h, v18.8h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.8h, v19.8h, v3.h[0] \n" "fmla v25.8h, v19.8h, v3.h[1] \n" "fmla v26.8h, v19.8h, v3.h[2] \n" "fmla v27.8h, v19.8h, v3.h[3] \n" "fmla v28.8h, v19.8h, v3.h[4] \n" "fmla v29.8h, v19.8h, v3.h[5] \n" "fmla v30.8h, v19.8h, v3.h[6] \n" "fmla v31.8h, v19.8h, v3.h[7] \n" "fmla v24.8h, v20.8h, v4.h[0] \n" "fmla v25.8h, v20.8h, v4.h[1] \n" "fmla v26.8h, v20.8h, v4.h[2] \n" "fmla v27.8h, v20.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v4.h[5] \n" "fmla v30.8h, v20.8h, v4.h[6] \n" "fmla v31.8h, v20.8h, v4.h[7] \n" "fmla v24.8h, v21.8h, v5.h[0] \n" "fmla v25.8h, v21.8h, v5.h[1] \n" "fmla v26.8h, v21.8h, v5.h[2] \n" "fmla v27.8h, v21.8h, v5.h[3] \n" "fmla v28.8h, v21.8h, v5.h[4] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v5.h[6] \n" "fmla v31.8h, v21.8h, v5.h[7] \n" "fmla v24.8h, v22.8h, v6.h[0] \n" "fmla v25.8h, v22.8h, v6.h[1] \n" "fmla v26.8h, v22.8h, v6.h[2] \n" "fmla v27.8h, v22.8h, v6.h[3] \n" "fmla v28.8h, v22.8h, v6.h[4] \n" "fmla v29.8h, v22.8h, v6.h[5] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.8h, v23.8h, v7.h[0] \n" "fmla v25.8h, v23.8h, v7.h[1] \n" "fmla v26.8h, v23.8h, v7.h[2] \n" "fmla v27.8h, v23.8h, v7.h[3] \n" "fmla v28.8h, v23.8h, v7.h[4] \n" "fmla v29.8h, v23.8h, v7.h[5] \n" "fmla v30.8h, v23.8h, v7.h[6] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "bne 0b \n" "st1 {v24.8h}, [%1], #16 \n" "st1 {v25.8h}, [%2], #16 \n" "st1 {v26.8h}, [%3], #16 \n" "st1 {v27.8h}, [%4], #16 \n" "st1 {v28.8h}, [%5], #16 \n" "st1 {v29.8h}, [%6], #16 \n" "st1 {v30.8h}, [%7], #16 \n" "st1 {v31.8h}, [%8], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "w"(_bias0) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 8); int nn = inch * maxk; // inch always > 0 asm volatile( "dup v24.4h, %22.h[0] \n" "dup v25.4h, %22.h[1] \n" "dup v26.4h, %22.h[2] \n" "dup v27.4h, %22.h[3] \n" "dup v28.4h, %22.h[4] \n" "dup v29.4h, %22.h[5] \n" "dup v30.4h, %22.h[6] \n" "dup v31.4h, %22.h[7] \n" "0: \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%9], #32 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "fmla v24.4h, v16.4h, v0.h[0] \n" "fmla v25.4h, v16.4h, v0.h[1] \n" "fmla v26.4h, v16.4h, v0.h[2] \n" "fmla v27.4h, v16.4h, v0.h[3] \n" "fmla v28.4h, v16.4h, v0.h[4] \n" "fmla v29.4h, v16.4h, v0.h[5] \n" "fmla v30.4h, v16.4h, v0.h[6] \n" "fmla v31.4h, v16.4h, v0.h[7] \n" "fmla v24.4h, v17.4h, v1.h[0] \n" "fmla v25.4h, v17.4h, v1.h[1] \n" "fmla v26.4h, v17.4h, v1.h[2] \n" "fmla v27.4h, v17.4h, v1.h[3] \n" "fmla v28.4h, v17.4h, v1.h[4] \n" "fmla v29.4h, v17.4h, v1.h[5] \n" "fmla v30.4h, v17.4h, v1.h[6] \n" "fmla v31.4h, v17.4h, v1.h[7] \n" "prfm pldl1keep, [%9, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%9], #32 \n" "fmla v24.4h, v18.4h, v2.h[0] \n" "fmla v25.4h, v18.4h, v2.h[1] \n" "fmla v26.4h, v18.4h, v2.h[2] \n" "fmla v27.4h, v18.4h, v2.h[3] \n" "fmla v28.4h, v18.4h, v2.h[4] \n" "fmla v29.4h, v18.4h, v2.h[5] \n" "fmla v30.4h, v18.4h, v2.h[6] \n" "fmla v31.4h, v18.4h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "fmla v24.4h, v19.4h, v3.h[0] \n" "fmla v25.4h, v19.4h, v3.h[1] \n" "fmla v26.4h, v19.4h, v3.h[2] \n" "fmla v27.4h, v19.4h, v3.h[3] \n" "fmla v28.4h, v19.4h, v3.h[4] \n" "fmla v29.4h, v19.4h, v3.h[5] \n" "fmla v30.4h, v19.4h, v3.h[6] \n" "fmla v31.4h, v19.4h, v3.h[7] \n" "fmla v24.4h, v20.4h, v4.h[0] \n" "fmla v25.4h, v20.4h, v4.h[1] \n" "fmla v26.4h, v20.4h, v4.h[2] \n" "fmla v27.4h, v20.4h, v4.h[3] \n" "fmla v28.4h, v20.4h, v4.h[4] \n" "fmla v29.4h, v20.4h, v4.h[5] \n" "fmla v30.4h, v20.4h, v4.h[6] \n" "fmla v31.4h, v20.4h, v4.h[7] \n" "fmla v24.4h, v21.4h, v5.h[0] \n" "fmla v25.4h, v21.4h, v5.h[1] \n" "fmla v26.4h, v21.4h, v5.h[2] \n" "fmla v27.4h, v21.4h, v5.h[3] \n" "fmla v28.4h, v21.4h, v5.h[4] \n" "fmla v29.4h, v21.4h, v5.h[5] \n" "fmla v30.4h, v21.4h, v5.h[6] \n" "fmla v31.4h, v21.4h, v5.h[7] \n" "fmla v24.4h, v22.4h, v6.h[0] \n" "fmla v25.4h, v22.4h, v6.h[1] \n" "fmla v26.4h, v22.4h, v6.h[2] \n" "fmla v27.4h, v22.4h, v6.h[3] \n" "fmla v28.4h, v22.4h, v6.h[4] \n" "fmla v29.4h, v22.4h, v6.h[5] \n" "fmla v30.4h, v22.4h, v6.h[6] \n" "fmla v31.4h, v22.4h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v24.4h, v23.4h, v7.h[0] \n" "fmla v25.4h, v23.4h, v7.h[1] \n" "fmla v26.4h, v23.4h, v7.h[2] \n" "fmla v27.4h, v23.4h, v7.h[3] \n" "fmla v28.4h, v23.4h, v7.h[4] \n" "fmla v29.4h, v23.4h, v7.h[5] \n" "fmla v30.4h, v23.4h, v7.h[6] \n" "fmla v31.4h, v23.4h, v7.h[7] \n" "bne 0b \n" "st1 {v24.4h}, [%1], #8 \n" "st1 {v25.4h}, [%2], #8 \n" "st1 {v26.4h}, [%3], #8 \n" "st1 {v27.4h}, [%4], #8 \n" "st1 {v28.4h}, [%5], #8 \n" "st1 {v29.4h}, [%6], #8 \n" "st1 {v30.4h}, [%7], #8 \n" "st1 {v31.4h}, [%8], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "w"(_bias0) // %22 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 8); int nn = inch * maxk; // inch always > 0 asm volatile( "mov v30.16b, %22.16b \n" "0: \n" "prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8h}, [%9], #16 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%10], #64 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%10], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.h}[0], [%1], #2 \n" "st1 {v30.h}[1], [%2], #2 \n" "st1 {v30.h}[2], [%3], #2 \n" "st1 {v30.h}[3], [%4], #2 \n" "st1 {v30.h}[4], [%5], #2 \n" "st1 {v30.h}[5], [%6], #2 \n" "st1 {v30.h}[6], [%7], #2 \n" "st1 {v30.h}[7], [%8], #2 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(outptr2), // %3 "=r"(outptr3), // %4 "=r"(outptr4), // %5 "=r"(outptr5), // %6 "=r"(outptr6), // %7 "=r"(outptr7), // %8 "=r"(tmpptr), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(outptr2), "4"(outptr3), "5"(outptr4), "6"(outptr5), "7"(outptr6), "8"(outptr7), "9"(tmpptr), "10"(kptr), "w"(_bias0) // %22 : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } } remain_outch_start += nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 bias0 = bias ? bias[p] : 0.f; float16x8_t _bias0 = vdupq_n_f16(bias0); int i = 0; for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 8); const __fp16* kptr = kernel.channel(p / 8 + p % 8); int nn = inch * maxk; // inch always > 0 asm volatile( "mov v30.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%2], #64 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.8h, v16.8h, v0.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%2], #64 \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.8h, v20.8h, v0.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "bne 0b \n" "st1 {v30.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const __fp16* kptr = kernel.channel(p / 8 + p % 8); int nn = inch * maxk; // inch always > 0 asm volatile( "mov v30.16b, %8.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" "fmla v30.4h, v16.4h, v0.h[0] \n" "fmla v30.4h, v17.4h, v0.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n" "fmla v30.4h, v18.4h, v0.h[2] \n" "fmla v30.4h, v19.4h, v0.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v30.4h, v20.4h, v0.h[4] \n" "fmla v30.4h, v21.4h, v0.h[5] \n" "fmla v30.4h, v22.4h, v0.h[6] \n" "fmla v30.4h, v23.4h, v0.h[7] \n" "bne 0b \n" "st1 {v30.4h}, [%1], #8 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr), "w"(_bias0) // %8 : "cc", "memory", "v0", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v30"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const __fp16* kptr = kernel.channel(p / 8 + p % 8); int nn = inch * maxk; // inch always > 0 float16x8_t _sum0 = vdupq_n_f16((__fp16)0.f); for (int q = 0; q < nn; q++) { float16x8_t _r0 = vld1q_f16(tmpptr); float16x8_t _k0 = vld1q_f16(kptr); _sum0 = vfmaq_f16(_sum0, _r0, _k0); kptr += 8; tmpptr += 8; } __fp16 sum0 = bias0 + vaddvq_f32(vcvt_f32_f16(vadd_f16(vget_low_f16(_sum0), vget_high_f16(_sum0)))); outptr0[0] = sum0; outptr0++; } } } static void convolution_im2col_sgemm_transform_kernel_pack8to1_fp16sa_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 8b-8a-maxk-inch/8a-outch/8b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(64 * maxk, inch / 8, outch / 8 + outch % 8, (size_t)2u); int q = 0; for (; q + 7 < outch; q += 8) { __fp16* g00 = kernel_tm.channel(q / 8); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 8; i++) { for (int j = 0; j < 8; j++) { const float* k00 = kernel.channel(q + j).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } for (; q < outch; q++) { __fp16* g00 = kernel_tm.channel(q / 8 + q % 8); for (int p = 0; p + 7 < inch; p += 8) { for (int k = 0; k < maxk; k++) { for (int i = 0; i < 8; i++) { const float* k00 = kernel.channel(q).row(p + i); g00[0] = (__fp16)k00[k]; g00++; } } } } } static void convolution_im2col_sgemm_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 16u, 8, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); __fp16* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v * 8; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); float16x8_t _val2 = vld1q_f16(sptr + stride_w * 16); float16x8_t _val3 = vld1q_f16(sptr + stride_w * 24); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); vst1q_f16(ptr + 16, _val2); vst1q_f16(ptr + 24, _val3); sptr += stride_w * 32; ptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _val0 = vld1q_f16(sptr); float16x8_t _val1 = vld1q_f16(sptr + stride_w * 8); vst1q_f16(ptr, _val0); vst1q_f16(ptr + 8, _val1); sptr += stride_w * 16; ptr += 16; } for (; j < outw; j++) { float16x8_t _val = vld1q_f16(sptr); vst1q_f16(ptr, _val); sptr += stride_w * 8; ptr += 8; } sptr += gap; } } } } } im2col_sgemm_pack8to1_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
csr.c
/*! * \file * * \brief Various routines with dealing with CSR matrices * * \author George Karypis * \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim */ #include "gklib/GKlib.h" #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a CSR matrix and initializes it \returns the allocated matrix. The various fields are set to NULL. */ /**************************************************************************/ gk_csr_t *gk_csr_Create() { gk_csr_t *mat; mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat"); gk_csr_Init(mat); return mat; } /*************************************************************************/ /*! Initializes the matrix \param mat is the matrix to be initialized. */ /*************************************************************************/ void gk_csr_Init(gk_csr_t *mat) { memset(mat, 0, sizeof(gk_csr_t)); mat->nrows = mat->ncols = -1; } /*************************************************************************/ /*! Frees all the memory allocated for matrix. \param mat is the matrix to be freed. */ /*************************************************************************/ void gk_csr_Free(gk_csr_t **mat) { if (*mat == NULL) return; gk_csr_FreeContents(*mat); gk_free((void **)mat, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the matrix's different fields and sets them to NULL. \param mat is the matrix whose contents will be freed. */ /*************************************************************************/ void gk_csr_FreeContents(gk_csr_t *mat) { gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids, &mat->colptr, &mat->colind, &mat->colval, &mat->colids, &mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums, &mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols, &mat->rwgts, &mat->cwgts, LTERM); } /*************************************************************************/ /*! Returns a copy of a matrix. \param mat is the matrix to be duplicated. \returns the newly created copy of the matrix. */ /**************************************************************************/ gk_csr_t *gk_csr_Dup(gk_csr_t *mat) { gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr, gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr")); if (mat->rowids) nmat->rowids = gk_icopy(mat->nrows, mat->rowids, gk_imalloc(mat->nrows, "gk_csr_Dup: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms, gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms")); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind, gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval, gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval")); /* copy the col structure */ if (mat->colptr) nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr, gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr")); if (mat->colids) nmat->colids = gk_icopy(mat->ncols, mat->colids, gk_imalloc(mat->ncols, "gk_csr_Dup: colids")); if (mat->cnorms) nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms, gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms")); if (mat->colind) nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind, gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind")); if (mat->colval) nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval, gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containint a set of consecutive rows. \param mat is the original matrix. \param rstart is the starting row. \param nrows is the number of rows from rstart to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows) { ssize_t i; gk_csr_t *nmat; if (rstart+nrows > mat->nrows) return NULL; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart, gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr")); for (i=nrows; i>=0; i--) nmat->rowptr[i] -= nmat->rowptr[0]; ASSERT(nmat->rowptr[0] == 0); if (mat->rowids) nmat->rowids = gk_icopy(nrows, mat->rowids+rstart, gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms")); if (mat->rsums) nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums")); ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowind+mat->rowptr[rstart], gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowval+mat->rowptr[rstart], gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Returns a submatrix corresponding to a specified partitioning of rows. \param mat is the original matrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid) { ssize_t i, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = 0; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { nmat->nrows++; nnz += mat->rowptr[i+1]-mat->rowptr[i]; } } nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Splits the matrix into multiple sub-matrices based on the provided color array. \param mat is the original matrix. \param color is an array of size equal to the number of non-zeros in the matrix (row-wise structure). The matrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of matrices for each supplied color number. */ /**************************************************************************/ gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_csr_t **smats; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; ncolors = gk_imax(rowptr[nrows], color)+1; smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats"); for (i=0; i<ncolors; i++) { smats[i] = gk_csr_Create(); smats[i]->nrows = mat->nrows; smats[i]->ncols = mat->ncols; smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) smats[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, smats[i]->rowptr); for (i=0; i<ncolors; i++) { smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind"); smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j]; smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j]; smats[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, smats[i]->rowptr); return smats; } /**************************************************************************/ /*! Reads a CSR matrix from the supplied file and stores it the matrix's forward structure. \param filename is the file that stores the data. \param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL specifying the type of the input format. The GK_CSR_FMT_CSR does not contain a header line, whereas the GK_CSR_FMT_BINROW is a binary format written by gk_csr_Write() using the same format specifier. \param readvals is either 1 or 0, indicating if the CSR file contains values or it does not. It only applies when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0, indicating if the numbering of the indices start from 1 or 0, respectively. If they start from 1, they are automatically decreamented during input so that they will start from 0. It only applies when GK_CSR_FMT_CSR is used. \returns the matrix that was read. */ /**************************************************************************/ gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering) { ssize_t i, k, l; size_t nfields, nrows, ncols, nnz, fmt, ncon; size_t lnlen; ssize_t *rowptr; int *rowind, ival; float *rowval=NULL, fval; int readsizes, readwgts; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin; gk_csr_t *mat=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == GK_CSR_FMT_BINROW) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr"); if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind"); if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); if (readvals == 1) { mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval"); if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_BINCOL) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr"); if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind"); if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); if (readvals) { mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval"); if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_CLUTO) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3) gk_errexit(SIGERR, "Header line must contain 3 integers.\n"); readsizes = 0; readwgts = 0; readvals = 1; numbering = 1; } else if (format == GK_CSR_FMT_METIS) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); ncols = nrows; nnz *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { readsizes = 0; readwgts = 0; gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && nnz%2 == 1) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals); if (readvals == 1) nnz = nnz/2; fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); } mat = gk_csr_Create(); mat->nrows = nrows; rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr"); rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind"); if (readvals != 2) rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval"); if (readsizes) mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes"); if (readwgts) mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts"); /*---------------------------------------------------------------------- * Read the sparse matrix file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { #ifdef __MSC__ mat->rsizes[i] = (float)strtod(head, &tail); #else mat->rsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (mat->rsizes[i] < 0) errexit("The size for vertex %zd must be >= 0\n", i+1); head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { #ifdef __MSC__ mat->rwgts[i*ncon+l] = (float)strtod(head, &tail); #else mat->rwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) errexit("The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (mat->rwgts[i*ncon+l] < 0) errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((rowind[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); ncols = gk_max(rowind[k], ncols); if (readvals == 1) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k); head = tail; rowval[k] = fval; } k++; } rowptr[i+1] = k; } if (format == GK_CSR_FMT_METIS) { ASSERT(ncols+1 == mat->nrows); mat->ncols = mat->nrows; } else { mat->ncols = ncols+1; } if (k != nnz) gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in " "the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return mat; } /**************************************************************************/ /*! Writes the row-based structure of a matrix into a file. \param mat is the matrix to be written, \param filename is the name of the output file. \param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL. \param writevals is either 1 or 0 indicating if the values will be written or not. This is only applicable when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0 indicating if the internal 0-based numbering will be shifted by one or not during output. This is only applicable when GK_CSR_FMT_CSR is used. */ /**************************************************************************/ void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering) { ssize_t i, j; FILE *fpout; if (format == GK_CSR_FMT_BINROW) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout); fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout); if (writevals) fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout); gk_fclose(fpout); return; } if (format == GK_CSR_FMT_BINCOL) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout); fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout); if (writevals) fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout); gk_fclose(fpout); return; } if (filename) fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); else fpout = stdout; if (format == GK_CSR_FMT_CLUTO) { fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]); writevals = 1; numbering = 1; } for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0)); if (writevals) fprintf(fpout, " %f", mat->rowval[j]); } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } /*************************************************************************/ /*! Prunes certain rows/columns of the matrix. The prunning takes place by analyzing the row structure of the matrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_csr_t *nmat; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight entries whose sum accounts for a certain fraction of the overall weight of the row/column. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param norm indicates the norm that will be used to aggregate the weights and possible values are 1 or 2, \param fraction is the fraction of the overall norm that will be retained by the kept entries. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction) { ssize_t i, j, nnz; int nrows, ncols, ncand, maxlen=0; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval, rsum, tsum; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<ncols; i++) { for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } gk_free((void **)&cand, LTERM); } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); for (i=0; i<nrows; i++) maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<nrows; i++) { for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[rowptr[i]+j] = cand[j].val; nrowval[rowptr[i]+j] = cand[j].key; } nrowptr[i+1] = rowptr[i]+j; } gk_free((void **)&cand, LTERM); } /* compact nrowind/nrowval */ nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight top-K entries along each row/column and those entries whose weight is greater than a specified value. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param topk is the number of the highest weight entries to keep. \param keepval is the weight of a term above which will be kept. This is used to select additional terms past the first topk. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval) { ssize_t i, j, k, nnz; int nrows, ncols, ncand; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) { for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++) { nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } for (; j<ncand; j++) { if (cand[j].key < keepval) break; nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&cand, LTERM); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++, nnz++) { nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } for (; j<ncand; j++, nnz++) { if (cand[j].key < keepval) break; nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } nrowptr[i+1] = nnz; } gk_free((void **)&cand, LTERM); break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the terms whose contribution to the total length of the document is greater than a user-splied multiple over the average. This routine assumes that the vectors are normalized to be unit length. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param zscore is the multiplicative factor over the average contribution to the length of the document. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore) { ssize_t i, j, nnz; int nrows; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind; float *rowval, *nrowval, avgwgt; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval"); switch (what) { case GK_CSR_COL: gk_errexit(SIGERR, "This has not been implemented yet.\n"); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { avgwgt = zscore/(rowptr[i+1]-rowptr[i]); for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] > avgwgt) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Compacts the column-space of the matrix by removing empty columns. As a result of the compaction, the column numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. The new columns are ordered in decreasing frequency. \param mat the matrix whose empty columns will be removed. */ /**************************************************************************/ void gk_csr_CompactColumns(gk_csr_t *mat) { ssize_t i; int nrows, ncols, nncols; ssize_t *rowptr; int *rowind, *colmap; gk_ikv_t *clens; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap"); clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens"); for (i=0; i<ncols; i++) { clens[i].key = 0; clens[i].val = i; } for (i=0; i<rowptr[nrows]; i++) clens[rowind[i]].key++; gk_ikvsortd(ncols, clens); for (nncols=0, i=0; i<ncols; i++) { if (clens[i].key > 0) colmap[clens[i].val] = nncols++; else break; } for (i=0; i<rowptr[nrows]; i++) rowind[i] = colmap[rowind[i]]; mat->ncols = nncols; gk_free((void **)&colmap, &clens, LTERM); } /*************************************************************************/ /*! Sorts the indices in increasing order \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of indices to sort. */ /**************************************************************************/ void gk_csr_SortIndices(gk_csr_t *mat, int what) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; val = mat->rowval; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; val = mat->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Creates a row/column index from the column/row data. \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which index will be created. */ /**************************************************************************/ void gk_csr_CreateIndex(gk_csr_t *mat, int what) { /* 'f' stands for forward, 'r' stands for reverse */ ssize_t i, j, k, nf, nr; ssize_t *fptr, *rptr; int *find, *rind; float *fval, *rval; switch (what) { case GK_CSR_COL: nf = mat->nrows; fptr = mat->rowptr; find = mat->rowind; fval = mat->rowval; if (mat->colptr) gk_free((void **)&mat->colptr, LTERM); if (mat->colind) gk_free((void **)&mat->colind, LTERM); if (mat->colval) gk_free((void **)&mat->colval, LTERM); nr = mat->ncols; rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; case GK_CSR_ROW: nf = mat->ncols; fptr = mat->colptr; find = mat->colind; fval = mat->colval; if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM); if (mat->rowind) gk_free((void **)&mat->rowind, LTERM); if (mat->rowval) gk_free((void **)&mat->rowval, LTERM); nr = mat->nrows; rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rptr[find[j]]++; } MAKECSR(i, nr, rptr); if (rptr[nr] > 6*nr) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } SHIFTCSR(i, nr, rptr); if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rval[rptr[find[j]]++] = fval[j]; } SHIFTCSR(i, nr, rptr); } } else { if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) { k = find[j]; rind[rptr[k]] = i; rval[rptr[k]++] = fval[j]; } } } else { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } } SHIFTCSR(i, nr, rptr); } } /*************************************************************************/ /*! Normalizes the rows/columns of the matrix to be unit length. \param mat the matrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_csr_Normalize(gk_csr_t *mat, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what&GK_CSR_ROW && mat->rowval) { n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if (what&GK_CSR_COL && mat->colval) { n = mat->ncols; ptr = mat->colptr; val = mat->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } /*************************************************************************/ /*! Applies different row scaling methods. \param mat the matrix itself, \param type indicates the type of row scaling. Possible values are: GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2. */ /**************************************************************************/ void gk_csr_Scale(gk_csr_t *mat, int type) { ssize_t i, j; int nrows, ncols, nnzcols, bgfreq; ssize_t *rowptr; int *rowind, *collen; float *rowval, *cscale, maxtf; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; switch (type) { case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .5 + .5*rowval[j]/maxtf; } } break; case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .1 + .9*rowval[j]/maxtf; } } break; case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j]))); } } } break; case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j])))); } } } break; case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65)); } } } break; case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75)); } } } break; case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85)); } } } break; case GK_CSR_LOG: /* TF' = 1+log_2(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { double logscale = 1.0/log(2.0); #pragma omp for schedule(static,32) for (i=0; i<rowptr[nrows]; i++) { if (rowval[i] != 0.0) rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale; } #ifdef XXX #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale; //rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale); } } #endif } break; case GK_CSR_IDF: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; case GK_CSR_IDF2: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } nnzcols = 0; #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) reduction(+:nnzcols) for (i=0; i<ncols; i++) nnzcols += (collen[i] > 0 ? 1 : 0); bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols)); printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq); #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; default: gk_errexit(SIGERR, "Unknown scaling type of %d\n", type); } } /*************************************************************************/ /*! Computes the sums of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which sums to compute. */ /**************************************************************************/ void gk_csr_ComputeSums(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *sums; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rsums) gk_free((void **)&mat->rsums, LTERM); sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->csums) gk_free((void **)&mat->csums, LTERM); sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; default: gk_errexit(SIGERR, "Invalid sum type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1); } /*************************************************************************/ /*! Computes the squared of the norms of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which squared norms to compute. */ /**************************************************************************/ void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *norms; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM); norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM); norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; default: gk_errexit(SIGERR, "Invalid norm type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1); } /*************************************************************************/ /*! Computes the similarity between two rows/columns \param mat the matrix itself. The routine assumes that the indices are sorted in increasing order. \param i1 is the first row/column, \param i2 is the second row/column, \param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of objects between the similarity will be computed, \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \returns the similarity between the two rows/columns. */ /**************************************************************************/ float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype) { int nind1, nind2; int *ind1, *ind2; float *val1, *val2, stat1, stat2, sim; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); nind1 = mat->rowptr[i1+1]-mat->rowptr[i1]; nind2 = mat->rowptr[i2+1]-mat->rowptr[i2]; ind1 = mat->rowind + mat->rowptr[i1]; ind2 = mat->rowind + mat->rowptr[i2]; val1 = mat->rowval + mat->rowptr[i1]; val2 = mat->rowval + mat->rowptr[i2]; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); nind1 = mat->colptr[i1+1]-mat->colptr[i1]; nind2 = mat->colptr[i2+1]-mat->colptr[i2]; ind1 = mat->colind + mat->colptr[i1]; ind2 = mat->colind + mat->colptr[i2]; val1 = mat->colval + mat->colptr[i1]; val2 = mat->colval + mat->colptr[i2]; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return 0.0; } switch (simtype) { case GK_CSR_COS: case GK_CSR_JAC: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]*val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]*val2[i2]; i2++; } else { sim += val1[i1]*val2[i2]; stat1 += val1[i1]*val1[i1]; stat2 += val2[i2]*val2[i2]; i1++; i2++; } } if (simtype == GK_CSR_COS) sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0); else sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_MIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_AMIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1 > 0.0 ? sim/stat1 : 0.0); break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } return sim; } /*************************************************************************/ /*! Finds the n most similar rows (neighbors) to the query using cosine similarity. \param mat the matrix itself \param nqterms is the number of columns in the query \param qind is the list of query columns \param qval is the list of correspodning query weights \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \param nsim is the maximum number of requested most similar rows. If -1 is provided, then everything is returned unsorted. \param minsim is the minimum similarity of the requested most similar rows \param hits is the result set. This array should be at least of length nsim. \param i_marker is an array of size equal to the number of rows whose values are initialized to -1. If NULL is provided then this array is allocated and freed internally. \param i_cand is an array of size equal to the number of rows. If NULL is provided then this array is allocated and freed internally. \returns the number of identified most similar rows, which can be smaller than the requested number of nnbrs in those cases in which there are no sufficiently many neighbors. */ /**************************************************************************/ int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind, float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits, int *i_marker, gk_fkv_t *i_cand) { ssize_t i, ii, j, k; int nrows, ncols, ncand; ssize_t *colptr; int *colind, *marker; float *colval, *rnorms, mynorm, *rsums, mysum; gk_fkv_t *cand; if (nqterms == 0) return 0; nrows = mat->nrows; ncols = mat->ncols; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker")); cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand")); switch (simtype) { case GK_CSR_COS: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } break; case GK_CSR_JAC: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } rnorms = mat->rnorms; mynorm = gk_fdot(nqterms, qval, 1, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key); break; case GK_CSR_MIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } rsums = mat->rsums; mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key); break; /* Assymetric MIN similarity */ case GK_CSR_AMIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/mysum; break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } /* go and prune the hits that are bellow minsim */ for (j=0, i=0; i<ncand; i++) { marker[cand[i].val] = -1; if (cand[i].key >= minsim) cand[j++] = cand[i]; } ncand = j; if (nsim == -1 || nsim >= ncand) { nsim = ncand; } else { nsim = gk_min(nsim, ncand); gk_dfkvkselect(ncand, nsim, cand); gk_fkvsortd(nsim, cand); } gk_fkvcopy(nsim, cand, hits); if (i_marker == NULL) gk_free((void **)&marker, LTERM); if (i_cand == NULL) gk_free((void **)&cand, LTERM); return nsim; }
Forza.h
#ifndef Forza_h__ #define Forza_h__ struct PatternData { uint32_t Count; uint32_t Size; uint32_t Length[16]; uint32_t Skip[16]; __m128i Value[16]; }; void GeneratePattern(const char* Signature, const char* Mask, PatternData* Out) { auto l = strlen(Mask); Out->Count = 0; for (auto i = 0; i < l; i++) { if (Mask[i] == '?') continue; auto ml = 0, sl = 0; for (auto j = i; j < l; j++) { if (Mask[j] == '?' || sl >= 16) break; sl++; } for (auto j = i + sl; j < l; j++) { if (Mask[j] != '?') break; ml++; } auto c = Out->Count; Out->Length[c] = sl; Out->Skip[c] = sl + ml; Out->Value[c] = _mm_loadu_si128((const __m128i*)((uint8_t*)Signature + i)); Out->Count++; i += sl - 1; } Out->Size = l; } __forceinline bool Matches(const uint8_t* Data, PatternData* Patterns) { auto k = Data + Patterns->Skip[0]; for (auto i = 1; i < Patterns->Count; i++) { auto l = Patterns->Length[i]; if (_mm_cmpestri(Patterns->Value[i], l, _mm_loadu_si128((const __m128i*)k), l, _SIDD_CMP_EQUAL_EACH | _SIDD_MASKED_NEGATIVE_POLARITY) != l) break; if (i + 1 == Patterns->Count) return true; k += Patterns->Skip[i]; } return false; } uint8_t* FindEx(const uint8_t* Data, const uint32_t Length, const char* Signature, const char* Mask) { PatternData d; GeneratePattern(Signature, Mask, &d); auto out = static_cast<uint8_t*>(nullptr); auto end = Data + Length - d.Size; #pragma omp parallel for for (intptr_t i = Length - 32; i >= 0; i -= 32) { if (out != nullptr) break; auto p = Data + i; auto b = _mm256_loadu_si256((const __m256i*)p); if (_mm256_test_all_zeros(b, b) == 1) continue; auto f = _mm_cmpestri(d.Value[0], d.Length[0], _mm256_extractf128_si256(b, 0), 16, _SIDD_CMP_EQUAL_ORDERED); if (f == 16) { f += _mm_cmpestri(d.Value[0], d.Length[0], _mm256_extractf128_si256(b, 1), 16, _SIDD_CMP_EQUAL_ORDERED); if (f == 32) continue; } PossibleMatch: p += f; if (p + d.Size > end) { for (auto j = 0; j < d.Size & j + i + f < Length; j++) { if (Mask[j] == 'x' && (uint8_t)Signature[j] != p[j]) break; if (j + 1 == d.Size) out = (uint8_t*)p; } continue; } if (Matches(p, &d)) out = (uint8_t*)p; if (out != nullptr) break; p++; f = _mm_cmpestri(d.Value[0], d.Length[0], _mm_loadu_si128((const __m128i*)p), 16, _SIDD_CMP_EQUAL_ORDERED); if (f < 16) goto PossibleMatch; } return out; } void FindLargestArray(const char* Signature, const char* Mask, int Out[2]) { uint32_t t1 = 0; uint32_t t2 = strlen(Signature); uint32_t len = strlen(Mask); for (auto j = t2; j < len; j++) { if (Mask[j] != 'x') continue; auto count = strlen(&Signature[j]); if (count > t2) { t1 = j; t2 = count; } j += (count - 1); } Out[0] = t1; Out[1] = t2; } uint8_t* Find(const uint8_t* Data, const uint32_t Length, const char* Signature, const char* Mask) { int d[2] = { 0 }; FindLargestArray(Signature, Mask, d); const uint8_t len = static_cast<uint8_t>(strlen(Mask)); const uint8_t mbeg = static_cast<uint8_t>(d[0]); const uint8_t mlen = static_cast<uint8_t>(d[1]); uint8_t wildcard[UCHAR_MAX + 1] = { 0 }; for (auto i = mbeg; i < mbeg + mlen; i++) wildcard[(uint8_t)Signature[i]] = 1; uint8_t mfirst = (uint8_t)Signature[mbeg]; uint8_t first = (uint8_t)Signature[0]; uint8_t last = (uint8_t)Signature[len - 1]; for (int i = Length - len; i >= 0; i--) { uint8_t c = Data[i]; uint8_t w = wildcard[c]; auto k = 0; while (w == 0 && i > mlen) { i -= mlen; w = wildcard[Data[i]]; k = 1; } if (k == 1) { i++; continue; } if (c != mfirst) continue; if (i - mbeg < 0 || i - mbeg + len > Length) return nullptr; for (auto j = 0; j < len - 1; j++) { if (j == mbeg || Mask[j] != 'x') continue; if (Data[i - mbeg + j] != (uint8_t)Signature[j]) break; if (j + 1 == len - 1) return (uint8_t*)(Data + i - mbeg); } } return nullptr; } struct ForzaSIMD : public BenchBase { virtual void init(Tests test) override { switch (test) { case Tests::First: Pattern = "\x45\x43\x45\x55\x33\x9a\xfa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxx????xxx"; break; case Tests::Second: Pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxxxxxx????xxx"; break; default: break; } CPUSupport = Supported(); } virtual LPVOID runOne(PBYTE baseAddress, DWORD size) override { if (CPUSupport) return FindEx((const uint8_t*)baseAddress, size, Pattern, Mask); if (!Init) { std::cout << "Your CPU does not support SIMD instructions, replacing with Boyer-Moore variant." << std::endl; Init = true; } return Find((const uint8_t*)baseAddress, size, Pattern, Mask); } virtual const char* name() const override { return "Forza (SIMD With OpenMP)"; } virtual bool BackwardsSearch() const override { return true; } bool Supported() { int id[4] = { 0 }; __cpuid(id, 1); bool sse42 = (id[3] & 0x04000000) != 0; bool avx = (id[2] & 0x18000000) != 0; return (sse42 && avx); } bool Init = false; bool CPUSupport; char* Pattern; char* Mask; }; struct Forza : public BenchBase { virtual void init(Tests test) override { switch (test) { case Tests::First: Pattern = "\x45\x43\x45\x55\x33\x9a\xfa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxx????xxx"; break; case Tests::Second: Pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxxxxxx????xxx"; break; default: break; } } virtual LPVOID runOne(PBYTE baseAddress, DWORD size) override { return Find((const uint8_t*)baseAddress, size, Pattern, Mask); } virtual const char* name() const override { return "Forza (Boyer-Moore Variant)"; } virtual bool BackwardsSearch() const override { return true; } char* Pattern; char* Mask; }; REGISTER(Forza); REGISTER(ForzaSIMD); #endif // Forza_h__
GB_unop__signum_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__signum_fp32_fp32) // op(A') function: GB (_unop_tran__signum_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = GB_signumf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_signumf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = GB_signumf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIGNUM || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__signum_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = GB_signumf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__signum_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
6238.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for simd schedule(static, 14) num_threads(14) private(j) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
test.c
#include <omp.h> #include <stdio.h> #define N 1024 #define EXPLICIT_TARGET_TASK 0 #pragma omp requires unified_shared_memory int A[N]; int B[N]; int C[N]; #if EXPLICIT_TARGET_TASK #define LOMP_TASK_DEP_40 1 #define LOMP_TARGET_40 1 #define LOMP_PROC_BIND_40 1 #define LOMP_OS_LINUX 1 #define LOMP_CANCEL_40 1 #include "/gsa/yktgsa-h1/00/eichen/new-tlomp/lomp/include/omp_interface.h" #define MAP_ITEM_SIZE (sizeof(int64_t)) #define MAP_SIZE (4*MAP_ITEM_SIZE) void _of1(lomp_Handle handle, char *fparg, char *sharg) { // for now use a compiler generated target because I am not sure how to // generate taret code, not sure its even possible directly from the // compiler #if 0 // get map pointers into frist private int mapNum1 = 1; int64_t *args1= (int64_t *) fparg; int64_t *args_base1 = args1 + mapNum1; int64_t *arg_sizes1 = args_base1 + mapNum1; int64_t *arg_types1 = arg_sizes1 + mapNum1; #endif #pragma omp target nowait map(A) { int i; for(int i=0; i<N; i++) A[i]++; } } #endif int main() { int i, errors; for(i=0; i<N; i++) { A[i] = i; } #if EXPLICIT_TARGET_TASK #pragma omp target data map(A) { lomp_Handle h = _lomp_GetHandle(); // first task lomp_TaskDep_Public *taskDepArray1; int mapNum1 = 1; void *fpagr1 = _lomp_Task_AllocateFirstPrivate_WithDeps(h, mapNum1 * MAP_SIZE, 1, &taskDepArray1); #if 0 // get map pointers into frist private int64_t *args1= (int64_t *) fparg1; int64_t *args_base1 = args1 + mapNum1; int64_t *arg_sizes1 = args_base1 + mapNum1; int64_t *arg_types1 = arg_sizes1 + mapNum1; // define maps args1[0] = args_base1[0] = &A[0]; arg_sizes1[0] = N * sizeof(int); arg_types1[0] = lomp_tmap_tofrom | lomp_tmap_target_param; #endif // set dependences taskDepArray1[0].addr = &A[0]; taskDepArray1[0].status = LOMP_TASK_DEP_STATUS_OUT; // launch target task _lomp_TargetTask_Setup_WithDep(0, h, _of1, fpagr1, NULL, 0, 1, taskDepArray1, 0); // second task lomp_TaskDep_Public *taskDepArray2; int mapNum2 = 1; void *fpagr2 = _lomp_Task_AllocateFirstPrivate_WithDeps(h, mapNum2 * MAP_SIZE, 1, &taskDepArray2); #if 0 // get map pointers into frist private int64_t *arg2s= (int64_t *) fparg2; int64_t *args_base2 = args2 + mapNum2; int64_t *arg_sizes2 = args_base2 + mapNum2; int64_t *arg_types2 = arg_sizes2 + mapNum2; // define maps args2[0] = args_base2[0] = &A[0]; arg_sizes2[0] = N * sizeof(int); arg_types2[0] = lomp_tmap_tofrom | lomp_tmap_target_param; #endif // set dependences taskDepArray2[0].addr = &A[0]; taskDepArray2[0].status = LOMP_TASK_DEP_STATUS_OUT; // launch target task _lomp_TargetTask_Setup_WithDep(0, h, _of1, fpagr2, NULL, 0, 1, taskDepArray2, 0); #pragma omp taskwait } #elif 1 #pragma omp target data map(A) { #pragma omp target map(A) depend(out: A[0]) nowait { int i; for(int i=0; i<N; i++) A[i]++; } #pragma omp target map(A) depend(out: A[0]) nowait { int i; for(int i=0; i<N; i++) A[i]++; } #pragma omp taskwait } #else #pragma omp target enter data map(to: A) depend(out: A[0]) nowait #pragma omp target map(A) depend(out: A[0]) nowait { int i; for(int i=0; i<N; i++) A[i]++; } #pragma omp target map(A) depend(out: A[0]) nowait { int i; for(int i=0; i<N; i++) A[i]++; } #pragma omp target exit data map(from: A) depend(out: A[0]) nowait #pragma omp taskwait #endif errors = 0; for(i=0; i<N; i++) { if (A[i] != i+2) printf("%d: got %d, expected %d; error %d\n", i, A[i], i+2, ++errors); if (errors>25) break; } printf("completed with %d errors\n", errors); return 1; }
csr_matop.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Matrix operation functions for hypre_CSRMatrix class. * *****************************************************************************/ #include "seq_mv.h" #include "csr_matrix.h" /*-------------------------------------------------------------------------- * hypre_CSRMatrixAdd: * adds two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixAddHost ( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, jcol, num_nonzeros; HYPRE_Int pos; HYPRE_Int *marker; if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; num_nonzeros = 0; C_i[0] = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize(C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { HYPRE_Int exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); hypre_assert(exec != HYPRE_EXEC_UNSET); hypre_CSRMatrix *C = NULL; if (exec == HYPRE_EXEC_HOST) { C = hypre_CSRMatrixAddHost(A,B); } #if defined(HYPRE_USING_CUDA) else { C = hypre_CSRMatrixAddDevice(A,B); } #endif return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixBigAdd: * adds two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixBigAdd( hypre_CSRMatrix *A, hypre_CSRMatrix *B ) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_BigInt *A_j = hypre_CSRMatrixBigJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_BigInt *B_j = hypre_CSRMatrixBigJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_BigInt *C_j; HYPRE_Int ia, ib, ic, num_nonzeros; HYPRE_BigInt jcol; HYPRE_Int pos; HYPRE_Int *marker; if (nrows_A != nrows_B || ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; num_nonzeros = 0; C_i[0] = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixBigInitialize(C); C_j = hypre_CSRMatrixBigJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } for (ib = B_i[ic]; ib < B_i[ic+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixMultiply * multiplies two CSR Matrices A and B and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix* hypre_CSRMatrixMultiplyHost( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, ja, jb, num_nonzeros=0; HYPRE_Int row_start, counter; HYPRE_Complex a_entry, b_entry; HYPRE_Int allsquare = 0; HYPRE_Int max_num_threads; HYPRE_Int *jj_count; if (ncols_A != nrows_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } if (nrows_A == ncols_B) allsquare = 1; C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED); max_num_threads = hypre_NumThreads(); jj_count = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ia, ib, ic, ja, jb, num_nonzeros, row_start, counter, a_entry, b_entry) #endif { HYPRE_Int *B_marker = NULL; HYPRE_Int ns, ne, ii, jj; HYPRE_Int size, rest, num_threads; HYPRE_Int i1; ii = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); size = nrows_A/num_threads; rest = nrows_A - size*num_threads; if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } B_marker = hypre_CTAlloc(HYPRE_Int, ncols_B, HYPRE_MEMORY_HOST); for (ib = 0; ib < ncols_B; ib++) B_marker[ib] = -1; num_nonzeros = 0; for (ic = ns; ic < ne; ic++) { C_i[ic] = num_nonzeros; if (allsquare) { B_marker[ic] = ic; num_nonzeros++; } for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { ja = A_j[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; if (B_marker[jb] != ic) { B_marker[jb] = ic; num_nonzeros++; } } } } jj_count[ii] = num_nonzeros; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { jj = jj_count[0]; for (i1 = 1; i1 < ii; i1++) jj += jj_count[i1]; for (i1 = ns; i1 < ne; i1++) C_i[i1] += jj; } else { C_i[nrows_A] = 0; for (i1 = 0; i1 < num_threads; i1++) C_i[nrows_A] += jj_count[i1]; C = hypre_CSRMatrixCreate(nrows_A, ncols_B, C_i[nrows_A]); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize(C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (ib = 0; ib < ncols_B; ib++) B_marker[ib] = -1; counter = C_i[ns]; for (ic = ns; ic < ne; ic++) { row_start = C_i[ic]; if (allsquare) { B_marker[ic] = counter; C_data[counter] = 0; C_j[counter] = ic; counter++; } for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { ja = A_j[ia]; a_entry = A_data[ia]; for (ib = B_i[ja]; ib < B_i[ja+1]; ib++) { jb = B_j[ib]; b_entry = B_data[ib]; if (B_marker[jb] < row_start) { B_marker[jb] = counter; C_j[B_marker[jb]] = jb; C_data[B_marker[jb]] = a_entry*b_entry; counter++; } else C_data[B_marker[jb]] += a_entry*b_entry; } } } hypre_TFree(B_marker, HYPRE_MEMORY_HOST); } /*end parallel region */ hypre_TFree(jj_count, HYPRE_MEMORY_HOST); return C; } hypre_CSRMatrix* hypre_CSRMatrixMultiply( hypre_CSRMatrix *A, hypre_CSRMatrix *B) { HYPRE_Int exec = hypre_GetExecPolicy2( hypre_CSRMatrixMemoryLocation(A), hypre_CSRMatrixMemoryLocation(B) ); hypre_assert(exec != HYPRE_EXEC_UNSET); hypre_CSRMatrix *C = NULL; if (exec == HYPRE_EXEC_HOST) { C = hypre_CSRMatrixMultiplyHost(A,B); } #if defined(HYPRE_USING_CUDA) else { C = hypre_CSRMatrixMultiplyDevice(A,B); } #endif return C; } hypre_CSRMatrix * hypre_CSRMatrixDeleteZeros( hypre_CSRMatrix *A, HYPRE_Real tol) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); hypre_CSRMatrix *B; HYPRE_Complex *B_data; HYPRE_Int *B_i; HYPRE_Int *B_j; HYPRE_Int zeros; HYPRE_Int i, j; HYPRE_Int pos_A, pos_B; zeros = 0; for (i=0; i < num_nonzeros; i++) if (hypre_cabs(A_data[i]) <= tol) zeros++; if (zeros) { B = hypre_CSRMatrixCreate(nrows_A,ncols_A,num_nonzeros-zeros); hypre_CSRMatrixInitialize(B); B_i = hypre_CSRMatrixI(B); B_j = hypre_CSRMatrixJ(B); B_data = hypre_CSRMatrixData(B); B_i[0] = 0; pos_A = 0; pos_B = 0; for (i=0; i < nrows_A; i++) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (hypre_cabs(A_data[j]) <= tol) { pos_A++; } else { B_data[pos_B] = A_data[pos_A]; B_j[pos_B] = A_j[pos_A]; pos_B++; pos_A++; } } B_i[i+1] = pos_B; } return B; } else return NULL; } /****************************************************************************** * * Finds transpose of a hypre_CSRMatrix * *****************************************************************************/ /** * idx = idx2*dim1 + idx1 * -> ret = idx1*dim2 + idx2 * = (idx%dim1)*dim2 + idx/dim1 */ static inline HYPRE_Int transpose_idx(HYPRE_Int idx, HYPRE_Int dim1, HYPRE_Int dim2) { return idx%dim1*dim2 + idx/dim1; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixTranspose *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixTransposeHost(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A); HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A); HYPRE_Int num_nonzerosA = hypre_CSRMatrixNumNonzeros(A); HYPRE_Complex *AT_data; /*HYPRE_Int *AT_i;*/ HYPRE_Int *AT_j; HYPRE_Int num_rowsAT; HYPRE_Int num_colsAT; HYPRE_Int num_nonzerosAT; HYPRE_Int max_col; HYPRE_Int i, j; /*-------------------------------------------------------------- * First, ascertain that num_cols and num_nonzeros has been set. * If not, set them. *--------------------------------------------------------------*/ if (! num_nonzerosA) { num_nonzerosA = A_i[num_rowsA]; } if (num_rowsA && num_nonzerosA && ! num_colsA) { max_col = -1; for (i = 0; i < num_rowsA; ++i) { for (j = A_i[i]; j < A_i[i+1]; j++) { if (A_j[j] > max_col) max_col = A_j[j]; } } num_colsA = max_col+1; } num_rowsAT = num_colsA; num_colsAT = num_rowsA; num_nonzerosAT = num_nonzerosA; *AT = hypre_CSRMatrixCreate(num_rowsAT, num_colsAT, num_nonzerosAT); if (0 == num_colsA) { // JSP: parallel counting sorting breaks down // when A has no columns hypre_CSRMatrixInitialize(*AT); return 0; } AT_j = hypre_CTAlloc(HYPRE_Int, num_nonzerosAT, HYPRE_MEMORY_SHARED); hypre_CSRMatrixJ(*AT) = AT_j; if (data) { AT_data = hypre_CTAlloc(HYPRE_Complex, num_nonzerosAT, HYPRE_MEMORY_SHARED); hypre_CSRMatrixData(*AT) = AT_data; } /*----------------------------------------------------------------- * Parallel count sort *-----------------------------------------------------------------*/ HYPRE_Int *bucket = hypre_TAlloc( HYPRE_Int, (num_colsA + 1)*hypre_NumThreads(), HYPRE_MEMORY_SHARED); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int num_threads = hypre_NumActiveThreads(); HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int iBegin = hypre_CSRMatrixGetLoadBalancedPartitionBegin(A); HYPRE_Int iEnd = hypre_CSRMatrixGetLoadBalancedPartitionEnd(A); hypre_assert(iBegin <= iEnd); hypre_assert(iBegin >= 0 && iBegin <= num_rowsA); hypre_assert(iEnd >= 0 && iEnd <= num_rowsA); HYPRE_Int i, j; memset(bucket + my_thread_num*num_colsA, 0, sizeof(HYPRE_Int)*num_colsA); /*----------------------------------------------------------------- * Count the number of entries that will go into each bucket * bucket is used as HYPRE_Int[num_threads][num_colsA] 2D array *-----------------------------------------------------------------*/ for (j = A_i[iBegin]; j < A_i[iEnd]; ++j) { HYPRE_Int idx = A_j[j]; bucket[my_thread_num*num_colsA + idx]++; } /*----------------------------------------------------------------- * Parallel prefix sum of bucket with length num_colsA * num_threads * accessed as if it is transposed as HYPRE_Int[num_colsA][num_threads] *-----------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = my_thread_num*num_colsA + 1; i < (my_thread_num + 1)*num_colsA; ++i) { HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA); HYPRE_Int transpose_i_minus_1 = transpose_idx(i - 1, num_threads, num_colsA); bucket[transpose_i] += bucket[transpose_i_minus_1]; } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #pragma omp master #endif { for (i = 1; i < num_threads; ++i) { HYPRE_Int j0 = num_colsA*i - 1, j1 = num_colsA*(i + 1) - 1; HYPRE_Int transpose_j0 = transpose_idx(j0, num_threads, num_colsA); HYPRE_Int transpose_j1 = transpose_idx(j1, num_threads, num_colsA); bucket[transpose_j1] += bucket[transpose_j0]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { HYPRE_Int transpose_i0 = transpose_idx(num_colsA*my_thread_num - 1, num_threads, num_colsA); HYPRE_Int offset = bucket[transpose_i0]; for (i = my_thread_num*num_colsA; i < (my_thread_num + 1)*num_colsA - 1; ++i) { HYPRE_Int transpose_i = transpose_idx(i, num_threads, num_colsA); bucket[transpose_i] += offset; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif /*---------------------------------------------------------------- * Load the data and column numbers of AT *----------------------------------------------------------------*/ if (data) { for (i = iEnd - 1; i >= iBegin; --i) { for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) { HYPRE_Int idx = A_j[j]; --bucket[my_thread_num*num_colsA + idx]; HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx]; AT_data[offset] = A_data[j]; AT_j[offset] = i; } } } else { for (i = iEnd - 1; i >= iBegin; --i) { for (j = A_i[i + 1] - 1; j >= A_i[i]; --j) { HYPRE_Int idx = A_j[j]; --bucket[my_thread_num*num_colsA + idx]; HYPRE_Int offset = bucket[my_thread_num*num_colsA + idx]; AT_j[offset] = i; } } } } /*end parallel region */ hypre_CSRMatrixI(*AT) = bucket; // JSP: bucket is hypre_NumThreads() times longer than // the size needed for AT_i, but this should be OK. // If the memory size is a concern, we can allocate // a new memory for AT_i and copy from bucket. hypre_CSRMatrixI(*AT)[num_colsA] = num_nonzerosA; return (0); } HYPRE_Int hypre_CSRMatrixTranspose(hypre_CSRMatrix *A, hypre_CSRMatrix **AT, HYPRE_Int data) { HYPRE_Int exec = hypre_GetExecPolicy1( hypre_CSRMatrixMemoryLocation(A) ); hypre_assert(exec != HYPRE_EXEC_UNSET); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_CSRMatrixTransposeHost(A, AT, data); } #if defined(HYPRE_USING_CUDA) else { ierr = hypre_CSRMatrixTransposeDevice(A, AT, data); } #endif return ierr; } HYPRE_Int hypre_CSRMatrixSplit(hypre_CSRMatrix *Bs_ext, HYPRE_BigInt first_col_diag_B, HYPRE_BigInt last_col_diag_B, HYPRE_Int num_cols_offd_B, HYPRE_BigInt *col_map_offd_B, HYPRE_Int *num_cols_offd_C_ptr, HYPRE_BigInt **col_map_offd_C_ptr, hypre_CSRMatrix **Bext_diag_ptr, hypre_CSRMatrix **Bext_offd_ptr) { HYPRE_Complex *Bs_ext_data = hypre_CSRMatrixData(Bs_ext); HYPRE_Int *Bs_ext_i = hypre_CSRMatrixI(Bs_ext); HYPRE_BigInt *Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext); HYPRE_Int num_rows_Bext = hypre_CSRMatrixNumRows(Bs_ext); HYPRE_Int B_ext_diag_size = 0; HYPRE_Int B_ext_offd_size = 0; HYPRE_Int *B_ext_diag_i = NULL; HYPRE_Int *B_ext_diag_j = NULL; HYPRE_Complex *B_ext_diag_data = NULL; HYPRE_Int *B_ext_offd_i = NULL; HYPRE_Int *B_ext_offd_j = NULL; HYPRE_Complex *B_ext_offd_data = NULL; HYPRE_Int *my_diag_array; HYPRE_Int *my_offd_array; HYPRE_BigInt *temp; HYPRE_Int max_num_threads; HYPRE_Int cnt = 0; hypre_CSRMatrix *Bext_diag = NULL; hypre_CSRMatrix *Bext_offd = NULL; HYPRE_BigInt *col_map_offd_C = NULL; HYPRE_Int num_cols_offd_C = 0; B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST); B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_Bext+1, HYPRE_MEMORY_HOST); max_num_threads = hypre_NumThreads(); my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel #endif { HYPRE_Int size, rest, ii; HYPRE_Int ns, ne; HYPRE_Int i1, i, j; HYPRE_Int my_offd_size, my_diag_size; HYPRE_Int cnt_offd, cnt_diag; HYPRE_Int num_threads = hypre_NumActiveThreads(); size = num_rows_Bext/num_threads; rest = num_rows_Bext - size*num_threads; ii = hypre_GetThreadNum(); if (ii < rest) { ns = ii*size+ii; ne = (ii+1)*size+ii+1; } else { ns = ii*size+rest; ne = (ii+1)*size+rest; } my_diag_size = 0; my_offd_size = 0; for (i=ns; i < ne; i++) { B_ext_diag_i[i] = my_diag_size; B_ext_offd_i[i] = my_offd_size; for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { my_offd_size++; } else { my_diag_size++; } } } my_diag_array[ii] = my_diag_size; my_offd_array[ii] = my_offd_size; #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii) { my_diag_size = my_diag_array[0]; my_offd_size = my_offd_array[0]; for (i1 = 1; i1 < ii; i1++) { my_diag_size += my_diag_array[i1]; my_offd_size += my_offd_array[i1]; } for (i1 = ns; i1 < ne; i1++) { B_ext_diag_i[i1] += my_diag_size; B_ext_offd_i[i1] += my_offd_size; } } else { B_ext_diag_size = 0; B_ext_offd_size = 0; for (i1 = 0; i1 < num_threads; i1++) { B_ext_diag_size += my_diag_array[i1]; B_ext_offd_size += my_offd_array[i1]; } B_ext_diag_i[num_rows_Bext] = B_ext_diag_size; B_ext_offd_i[num_rows_Bext] = B_ext_offd_size; if (B_ext_diag_size) { B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST); B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size) { B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST); B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST); } if (B_ext_offd_size || num_cols_offd_B) { temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif cnt_offd = B_ext_offd_i[ns]; cnt_diag = B_ext_diag_i[ns]; for (i = ns; i < ne; i++) { for (j = Bs_ext_i[i]; j < Bs_ext_i[i+1]; j++) { if (Bs_ext_j[j] < first_col_diag_B || Bs_ext_j[j] > last_col_diag_B) { temp[cnt_offd] = Bs_ext_j[j]; B_ext_offd_j[cnt_offd] = Bs_ext_j[j]; B_ext_offd_data[cnt_offd++] = Bs_ext_data[j]; } else { B_ext_diag_j[cnt_diag] = Bs_ext_j[j] - first_col_diag_B; B_ext_diag_data[cnt_diag++] = Bs_ext_data[j]; } } } /* This computes the mappings */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (ii == 0) { cnt = 0; if (B_ext_offd_size || num_cols_offd_B) { cnt = B_ext_offd_size; for (i=0; i < num_cols_offd_B; i++) { temp[cnt++] = col_map_offd_B[i]; } if (cnt) { hypre_BigQsort0(temp, 0, cnt-1); num_cols_offd_C = 1; HYPRE_BigInt value = temp[0]; for (i = 1; i < cnt; i++) { if (temp[i] > value) { value = temp[i]; temp[num_cols_offd_C++] = value; } } } if (num_cols_offd_C) { col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); } for (i = 0; i < num_cols_offd_C; i++) { col_map_offd_C[i] = temp[i]; } hypre_TFree(temp, HYPRE_MEMORY_HOST); } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = ns; i < ne; i++) { for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i+1]; j++) { B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_ext_offd_j[j], num_cols_offd_C); } } } /* end parallel region */ hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST); hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST); Bext_diag = hypre_CSRMatrixCreate(num_rows_Bext, last_col_diag_B-first_col_diag_B+1, B_ext_diag_size); hypre_CSRMatrixMemoryLocation(Bext_diag) = HYPRE_MEMORY_HOST; Bext_offd = hypre_CSRMatrixCreate(num_rows_Bext, num_cols_offd_C, B_ext_offd_size); hypre_CSRMatrixMemoryLocation(Bext_offd) = HYPRE_MEMORY_HOST; hypre_CSRMatrixI(Bext_diag) = B_ext_diag_i; hypre_CSRMatrixJ(Bext_diag) = B_ext_diag_j; hypre_CSRMatrixData(Bext_diag) = B_ext_diag_data; hypre_CSRMatrixI(Bext_offd) = B_ext_offd_i; hypre_CSRMatrixJ(Bext_offd) = B_ext_offd_j; hypre_CSRMatrixData(Bext_offd) = B_ext_offd_data; *col_map_offd_C_ptr = col_map_offd_C; *Bext_diag_ptr = Bext_diag; *Bext_offd_ptr = Bext_offd; *num_cols_offd_C_ptr = num_cols_offd_C; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixReorder: * Reorders the column and data arrays of a square CSR matrix, such that the * first entry in each row is the diagonal one. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_CSRMatrixReorder(hypre_CSRMatrix *A) { HYPRE_Int i, j, tempi, row_size; HYPRE_Complex tempd; HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A); HYPRE_Int num_colsA = hypre_CSRMatrixNumCols(A); /* the matrix should be square */ if (num_rowsA != num_colsA) return -1; for (i = 0; i < num_rowsA; i++) { row_size = A_i[i+1]-A_i[i]; for (j = 0; j < row_size; j++) { if (A_j[j] == i) { if (j != 0) { tempi = A_j[0]; A_j[0] = A_j[j]; A_j[j] = tempi; tempd = A_data[0]; A_data[0] = A_data[j]; A_data[j] = tempd; } break; } /* diagonal element is missing */ if (j == row_size-1) return -2; } A_j += row_size; A_data += row_size; } return 0; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixAddPartial: * adds matrix rows in the CSR matrix B to the CSR Matrix A, where row_nums[i] * defines to which row of A the i-th row of B is added, and returns a CSR Matrix C; * Note: The routine does not check for 0-elements which might be generated * through cancellation of elements in A and B or already contained * in A and B. To remove those, use hypre_CSRMatrixDeleteZeros *--------------------------------------------------------------------------*/ hypre_CSRMatrix * hypre_CSRMatrixAddPartial( hypre_CSRMatrix *A, hypre_CSRMatrix *B, HYPRE_Int *row_nums) { HYPRE_Complex *A_data = hypre_CSRMatrixData(A); HYPRE_Int *A_i = hypre_CSRMatrixI(A); HYPRE_Int *A_j = hypre_CSRMatrixJ(A); HYPRE_Int nrows_A = hypre_CSRMatrixNumRows(A); HYPRE_Int ncols_A = hypre_CSRMatrixNumCols(A); HYPRE_Complex *B_data = hypre_CSRMatrixData(B); HYPRE_Int *B_i = hypre_CSRMatrixI(B); HYPRE_Int *B_j = hypre_CSRMatrixJ(B); HYPRE_Int nrows_B = hypre_CSRMatrixNumRows(B); HYPRE_Int ncols_B = hypre_CSRMatrixNumCols(B); hypre_CSRMatrix *C; HYPRE_Complex *C_data; HYPRE_Int *C_i; HYPRE_Int *C_j; HYPRE_Int ia, ib, ic, jcol, num_nonzeros; HYPRE_Int pos, i, i2, j, cnt; HYPRE_Int *marker; HYPRE_Int *map; HYPRE_Int *temp; if (ncols_A != ncols_B) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! incompatible matrix dimensions!\n"); return NULL; } map = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); temp = hypre_CTAlloc(HYPRE_Int, nrows_B, HYPRE_MEMORY_HOST); for (i=0; i < nrows_B; i++) { map[i] = i; temp[i] = row_nums[i]; } hypre_qsort2i(temp,map,0,nrows_B-1); marker = hypre_CTAlloc(HYPRE_Int, ncols_A, HYPRE_MEMORY_HOST); C_i = hypre_CTAlloc(HYPRE_Int, nrows_A+1, HYPRE_MEMORY_SHARED); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; num_nonzeros = 0; C_i[0] = 0; cnt = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; marker[jcol] = ic; num_nonzeros++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] != ic) { marker[jcol] = ic; num_nonzeros++; } } } else break; } } C_i[ic+1] = num_nonzeros; } C = hypre_CSRMatrixCreate(nrows_A, ncols_A, num_nonzeros); hypre_CSRMatrixI(C) = C_i; hypre_CSRMatrixInitialize(C); C_j = hypre_CSRMatrixJ(C); C_data = hypre_CSRMatrixData(C); for (ia = 0; ia < ncols_A; ia++) marker[ia] = -1; cnt = 0; pos = 0; for (ic = 0; ic < nrows_A; ic++) { for (ia = A_i[ic]; ia < A_i[ic+1]; ia++) { jcol = A_j[ia]; C_j[pos] = jcol; C_data[pos] = A_data[ia]; marker[jcol] = pos; pos++; } if (cnt < nrows_B && temp[cnt] == ic) { for (j = cnt; j < nrows_B; j++) { if (temp[j] == ic) { i2 = map[cnt++]; for (ib = B_i[i2]; ib < B_i[i2+1]; ib++) { jcol = B_j[ib]; if (marker[jcol] < C_i[ic]) { C_j[pos] = jcol; C_data[pos] = B_data[ib]; marker[jcol] = pos; pos++; } else { C_data[marker[jcol]] += B_data[ib]; } } } else break; } } } hypre_TFree(marker, HYPRE_MEMORY_HOST); hypre_TFree(map, HYPRE_MEMORY_HOST); hypre_TFree(temp, HYPRE_MEMORY_HOST); return C; } /*-------------------------------------------------------------------------- * hypre_CSRMatrixSumElts: * Returns the sum of all matrix elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_CSRMatrixSumElts( hypre_CSRMatrix *A ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_CSRMatrixData( A ); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int i; for ( i = 0; i < num_nonzeros; ++i ) { sum += data[i]; } return sum; } HYPRE_Real hypre_CSRMatrixFnorm( hypre_CSRMatrix *A ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_CSRMatrixData( A ); HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A); HYPRE_Int i, nrows, *A_i; nrows = hypre_CSRMatrixNumRows(A); A_i = hypre_CSRMatrixI(A); hypre_assert(num_nonzeros == A_i[nrows]); for ( i = 0; i < num_nonzeros; ++i ) { HYPRE_Complex v = data[i]; sum += v * v; } return sqrt(sum); }
rose_jacobi_float_sve.c
#include "rex_kmp.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/timeb.h> #include <malloc.h> #include <math.h> #include <arm_sve.h> #define REAL float static double read_timer_ms() { struct timeb tm; ftime(&tm); return ((double )tm . time) * 1000.0 + ((double )tm . millitm); } /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define DEFAULT_DIMSIZE 256 void print_array(char *title,char *name,float *A,int n,int m) { printf("%s:\n",title); int i; int j; for (i = 0; i < n; i++) { for (j = 0; j < m; j++) { printf("%s[%d][%d]:%f ",name,i,j,A[i * m + j]); } printf("\n"); } printf("\n"); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize(int n,int m,float alpha,float *dx,float *dy,float *u_p,float *f_p) { int i; int j; int xx; int yy; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); //double PI=3.1415926; *dx = (2.0 / (n - 1)); *dy = (2.0 / (m - 1)); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = ((int )(- 1.0 + ( *dx * (i - 1)))); yy = ((int )(- 1.0 + ( *dy * (j - 1)))); u[i][j] = 0.0; f[i][j] = (- 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy))); } } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check(int n,int m,float alpha,float dx,float dy,float *u_p,float *f_p) { int i; int j; float xx; float yy; float temp; float error; error = 0.0; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); for (i = 0; i < n; i++) for (j = 0; j < m; j++) { xx = (- 1.0 + (dx * (i - 1))); yy = (- 1.0 + (dy * (j - 1))); temp = (u[i][j] - (1.0 - (xx * xx)) * (1.0 - (yy * yy))); error = error + temp * temp; } error = (sqrt(error) / (n * m)); printf("Solution Error: %2.6g\n",error); } void jacobi_seq(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); void jacobi_omp(int n,int m,float dx,float dy,float alpha,float relax,float *u_p,float *f_p,float tol,int mits); int main(int argc,char *argv[]) { int status = 0; int n = 256; int m = 256; float alpha = 0.0543; float tol = 0.0000000001; float relax = 1.0; int mits = 5000; /*fprintf(stderr, "Usage: jacobi [<n> <m> <alpha> <tol> <relax> <mits>]\n"); fprintf(stderr, "\tn - grid dimension in x direction, default: %d\n", n); fprintf(stderr, "\tm - grid dimension in y direction, default: n if provided or %d\n", m); fprintf(stderr, "\talpha - Helmholtz constant (always greater than 0.0), default: %g\n", alpha); fprintf(stderr, "\ttol - error tolerance for iterative solver, default: %g\n", tol); fprintf(stderr, "\trelax - Successice over relaxation parameter, default: %g\n", relax); fprintf(stderr, "\tmits - Maximum iterations for iterative solver, default: %d\n", mits);*/ if (argc == 2) { sscanf(argv[1],"%d",&n); m = n; } else if (argc == 3) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); } else if (argc == 4) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); } else if (argc == 5) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); } else if (argc == 6) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); } else if (argc == 7) { sscanf(argv[1],"%d",&n); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%g",&alpha); sscanf(argv[4],"%g",&tol); sscanf(argv[5],"%g",&relax); sscanf(argv[6],"%d",&mits); } else { /* the rest of arg ignored */ } printf("jacobi %d %d %g %g %g %d\n",n,m,alpha,tol,relax,mits); printf("------------------------------------------------------------------------------------------------------\n"); /** init the array */ float *u = (float *)(malloc(sizeof(float ) * n * m)); float *uomp = (float *)(malloc(sizeof(float ) * n * m)); float *f = (float *)(malloc(sizeof(float ) * n * m)); float dx; /* grid spacing in x direction */ float dy; /* grid spacing in y direction */ initialize(n,m,alpha,&dx,&dy,u,f); memcpy(uomp,u,sizeof(float ) * n * m); //warming up jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits); jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits); initialize(n,m,alpha,&dx,&dy,u,f); memcpy(uomp,u,sizeof(float ) * n * m); int num_runs = 20; double elapsed = 0; for (int i = 0; i < 20; i++) { double elapsed1 = read_timer_ms(); jacobi_seq(n,m,dx,dy,alpha,relax,u,f,tol,mits); elapsed += read_timer_ms() - elapsed1; } printf("seq elasped time(ms): %4f\n",elapsed / num_runs); //double mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; //printf("MFLOPS: %12.6g\n", mflops); puts("================"); double elapsed2 = 0; for (int i = 0; i < 20; i++) { double elapsed3 = read_timer_ms(); jacobi_omp(n,m,dx,dy,alpha,relax,uomp,f,tol,mits); elapsed2 += read_timer_ms() - elapsed3; } printf("OpenMP elasped time(ms): %4f\n",elapsed2 / num_runs); //mflops = (0.001 * mits * (n - 2) * (m - 2) * 13) / elapsed; //printf("MFLOPS: %12.6g\n", mflops); //print_array("Sequential Run", "u",(REAL*)u, n, m); error_check(n,m,alpha,dx,dy,u,f); free(u); free(f); free(uomp); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,mits) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * mits Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi_seq(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float uold[n][m]; float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; /* Copy new solution into old */ for (i = 0; i < n; i++) for (j = 0; j < m; j++) uold[i][j] = u[i][j]; for (i = 1; i < n - 1; i++) for (j = 1; j < m - 1; j++) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); } void jacobi_omp(int n,int m,float dx,float dy,float alpha,float omega,float *u_p,float *f_p,float tol,int mits) { int i; int j; int k; float error; float ax; float ay; float b; float resid; float *tmp = (float *)(malloc(sizeof(float ) * n * m)); float (*uold)[m] = ((float (*)[m])tmp); float (*u)[m] = ((float (*)[m])u_p); float (*f)[m] = ((float (*)[m])f_p); /* * Initialize coefficients */ /* X-direction coef */ ax = (1.0 / (dx * dx)); /* Y-direction coef */ ay = (1.0 / (dy * dy)); /* Central coeff */ b = (- 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha); error = (10.0 * tol); k = 1; while(k <= mits && error > tol){ error = 0.0; //printf("===================== iteration %d ===========================\n", k); /* Copy new solution into old */ for (i = 0; i < n; i++) { svbool_t __pg0 = svwhilelt_b32(0,m - 1); for (j = 0; j <= m - 1; j += svcntw()) { float *__ptr0 = uold[i]; float *__ptr1 = u[i]; svfloat32_t __vec2 = svld1(__pg0,&__ptr1[j]); svst1(__pg0,&__ptr0[j],__vec2); __pg0 = svwhilelt_b32(j,m - 1); } } for (i = 1; i < n - 1; i++) { #pragma omp simd reduction(+ : error) for (j = 1; j <= m - 1 - 1; j += 1) { resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b; //printf("i: %d, j: %d, resid: %f\n", i, j, resid); u[i][j] = uold[i][j] - omega * resid; error = error + resid * resid; } } /* Error check */ //if (k % 500 == 0) // printf("Finished %d iteration with error: %g\n", k, error); error = (sqrt(error) / (n * m)); k = k + 1; /* End iteration loop */ } printf("Total Number of Iterations: %d\n",k); printf("Residual: %.15g\n",error); free(tmp); }
gates.h
/* * This file is part of Quantum++. * * MIT License * * Copyright (c) 2013 - 2019 Vlad Gheorghiu (vgheorgh@gmail.com) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /** * \file classes/gates.h * \brief Quantum gates */ #ifndef CLASSES_GATES_H_ #define CLASSES_GATES_H_ namespace qpp { /** * \class qpp::Gates * \brief const Singleton class that implements most commonly used gates */ class Gates final : public internal::Singleton<const Gates> // const Singleton { friend class internal::Singleton<const Gates>; public: // One qubit gates cmat Id2{cmat::Identity(2, 2)}; ///< Identity gate cmat H{cmat::Zero(2, 2)}; ///< Hadamard gate cmat X{cmat::Zero(2, 2)}; ///< Pauli Sigma-X gate cmat Y{cmat::Zero(2, 2)}; ///< Pauli Sigma-Y gate cmat Z{cmat::Zero(2, 2)}; ///< Pauli Sigma-Z gate cmat S{cmat::Zero(2, 2)}; ///< S gate cmat T{cmat::Zero(2, 2)}; ///< T gate // two qubit gates cmat CNOT{cmat::Identity(4, 4)}; ///< Controlled-NOT control target gate cmat CZ{cmat::Identity(4, 4)}; ///< Controlled-Phase gate cmat CNOTba{cmat::Zero(4, 4)}; ///< Controlled-NOT target->control gate cmat SWAP{cmat::Identity(4, 4)}; ///< SWAP gate // three qubit gates cmat TOF{cmat::Identity(8, 8)}; ///< Toffoli gate cmat FRED{cmat::Identity(8, 8)}; ///< Fredkin gate private: /** * \brief Initializes the gates */ Gates() { H << 1 / std::sqrt(2.), 1 / std::sqrt(2.), 1 / std::sqrt(2.), -1 / std::sqrt(2.); X << 0, 1, 1, 0; Z << 1, 0, 0, -1; Y << 0, -1_i, 1_i, 0; S << 1, 0, 0, 1_i; T << 1, 0, 0, std::exp(1_i * pi / 4.0); CNOT.block(2, 2, 2, 2) = X; CNOTba(0, 0) = 1; CNOTba(1, 3) = 1; CNOTba(2, 2) = 1; CNOTba(3, 1) = 1; CZ(3, 3) = -1; SWAP.block(1, 1, 2, 2) = X; TOF.block(6, 6, 2, 2) = X; FRED.block(4, 4, 4, 4) = SWAP; } /** * \brief Default destructor */ ~Gates() = default; public: // variable gates // one qubit gates /** * \brief Qubit rotation of \a theta about the 3-dimensional real (unit) * vector \a n * * \param theta Rotation angle * \param n 3-dimensional real (unit) vector * \return Rotation gate */ cmat Rn(double theta, const std::vector<double>& n) const { // EXCEPTION CHECKS // check 3-dimensional vector if (n.size() != 3) throw exception::CustomException( "qpp::Gates::Rn()", "n is not a 3-dimensional vector!"); // END EXCEPTION CHECKS cmat result(2, 2); result = std::cos(theta / 2) * Id2 - 1_i * std::sin(theta / 2) * (n[0] * X + n[1] * Y + n[2] * Z); return result; } /** * \brief Qubit rotation of \a theta about the X axis * * \param theta Rotation angle * \return Rotation gate */ cmat RX(double theta) const { // EXCEPTION CHECKS // END EXCEPTION CHECKS return Rn(theta, {1, 0, 0}); } /** * \brief Qubit rotation of \a theta about the Y axis * * \param theta Rotation angle * \return Rotation gate */ cmat RY(double theta) const { // EXCEPTION CHECKS // END EXCEPTION CHECKS return Rn(theta, {0, 1, 0}); } /** * \brief Qubit rotation of \a theta about the Z axis * * \param theta Rotation angle * \return Rotation gate */ cmat RZ(double theta) const { // EXCEPTION CHECKS // END EXCEPTION CHECKS return Rn(theta, {0, 0, 1}); } // one quDit gates /** * \brief Generalized Z gate for qudits * * \note Defined as \f$ Z = \sum_{j=0}^{D-1} \exp(2\pi \mathrm{i} j/D) * |j\rangle\langle j| \f$ * * \param D Dimension of the Hilbert space * \return Generalized Z gate for qudits */ cmat Zd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Zd()"); // END EXCEPTION CHECKS cmat result = cmat::Zero(D, D); for (idx i = 0; i < D; ++i) result(i, i) = std::pow(omega(D), static_cast<double>(i)); return result; } /** * \brief SWAP gate for qudits * * \param D Dimension of the Hilbert space * \return SWAP gate for qudits */ cmat SWAPd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::SWAPd()"); // END EXCEPTION CHECKS cmat result = cmat::Zero(D * D, D * D); #ifdef WITH_OPENMP_ #pragma omp parallel for collapse(2) #endif // WITH_OPENMP_ // column major order for speed for (idx j = 0; j < D; ++j) for (idx i = 0; i < D; ++i) result(D * i + j, i + D * j) = 1; return result; } /** * \brief Quantum Fourier transform gate for qudits * * \note Defined as * \f$ F = \sum_{j,k=0}^{D-1} \exp(2\pi \mathrm{i} jk/D) |j\rangle\langle k| * \f$ * * \param D Dimension of the Hilbert space * \return Fourier transform gate for qudits */ cmat Fd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Fd()"); // END EXCEPTION CHECKS cmat result(D, D); #ifdef WITH_OPENMP_ #pragma omp parallel for collapse(2) #endif // WITH_OPENMP_ // column major order for speed for (idx j = 0; j < D; ++j) for (idx i = 0; i < D; ++i) result(i, j) = 1 / std::sqrt(D) * std::pow(omega(D), static_cast<double>(i * j)); return result; } /** * \brief Modular multiplication gate for qubits * Implements \f$ |x\rangle \longrightarrow |ax \mathrm{ mod } N\rangle \f$ * * \note For the gate to be unitary, \a a and \a N should be co-prime. The * function does not check co-primality in release versions! * * \note The number of qubits required to implement the gate should satisfy * \f$ n \geq \lceil\log_2(N)\rceil \f$ * * \param a Positive integer less than \a N * \param N Positive integer * \param n Number of qubits required for implementing the gate * \return Modular multiplication gate */ cmat MODMUL(idx a, idx N, idx n) const { // check co-primality (unitarity) only in DEBUG version #ifndef NDEBUG assert(gcd(a, N) == 1); #endif // EXCEPTION CHECKS // check valid arguments if (N < 3 || a >= N) { throw exception::OutOfRange("qpp::Gates::MODMUL()"); } // check enough qubits if (n < static_cast<idx>(std::ceil(std::log2(N)))) { throw exception::OutOfRange("qpp::Gates::MODMUL()"); } // END EXCEPTION CHECKS // minimum number of qubits required to implement the gate idx D = static_cast<idx>(std::llround(std::pow(2, n))); cmat result = cmat::Zero(D, D); #ifdef WITH_OPENMP_ #pragma omp parallel for collapse(2) #endif // WITH_OPENMP_ // column major order for speed for (idx j = 0; j < N; ++j) for (idx i = 0; i < N; ++i) if (static_cast<idx>(modmul(j, a, N)) == i) result(i, j) = 1; #ifdef WITH_OPENMP_ #pragma omp parallel for #endif // WITH_OPENMP_ // complete the matrix for (idx i = N; i < D; ++i) result(i, i) = 1; return result; } /** * \brief Generalized X gate for qudits * * \note Defined as \f$ X = \sum_{j=0}^{D-1} |j\oplus 1\rangle\langle j| * \f$, i.e. raising operator \f$ X|j\rangle = |j\oplus 1\rangle\f$ * * \param D Dimension of the Hilbert space * \return Generalized X gate for qudits */ cmat Xd(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Xd()"); // END EXCEPTION CHECKS return Fd(D).inverse() * Zd(D) * Fd(D); } /** * \brief Identity gate * * \note Can change the return type from complex matrix (default) by * explicitly specifying the template parameter * * \param D Dimension of the Hilbert space * \return Identity gate on a Hilbert space of dimension \a D */ template <typename Derived = Eigen::MatrixXcd> Derived Id(idx D = 2) const { // EXCEPTION CHECKS // check valid dimension if (D == 0) throw exception::DimsInvalid("qpp::Gates::Id()"); // END EXCEPTION CHECKS return Derived::Identity(D, D); } /** * \brief Generates the multi-partite multiple-controlled-\a A gate in * matrix form * \see qpp::applyCTRL() * * \note The dimension of the gate \a A must match * the dimension of \a target * * \param A Eigen expression * \param ctrl Control subsystem indexes * \param target Subsystem indexes where the gate \a A is applied * \param n Total number of subsystems * \param d Subsystem dimensions * \param shift Performs the control as if the \a ctrl qudit states were * \f$ X\f$-incremented component-wise by \a shift. If non-empty (default), * the size of \a shift must be the same as the size of \a ctrl. * \return CTRL-A gate, as a matrix over the same scalar field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> CTRL(const Eigen::MatrixBase<Derived>& A, const std::vector<idx>& ctrl, const std::vector<idx>& target, idx n, idx d = 2, std::vector<idx> shift = {}) const { const dyn_mat<typename Derived::Scalar>& rA = A.derived(); // EXCEPTION CHECKS // check matrix zero-size if (!internal::check_nonzero_size(rA)) throw exception::ZeroSize("qpp::Gates::CTRL()"); // check square matrix if (!internal::check_square_mat(rA)) throw exception::MatrixNotSquare("qpp::Gates::CTRL()"); // check lists zero-size if (ctrl.empty()) throw exception::ZeroSize("qpp::Gates::CTRL()"); if (target.empty()) throw exception::ZeroSize("qpp::Gates::CTRL()"); // check out of range if (n == 0) throw exception::OutOfRange("qpp::Gates::CTRL()"); // check valid local dimension if (d == 0) throw exception::DimsInvalid("qpp::Gates::CTRL()"); // ctrl + gate subsystem vector std::vector<idx> ctrlgate = ctrl; ctrlgate.insert(std::end(ctrlgate), std::begin(target), std::end(target)); std::sort(std::begin(ctrlgate), std::end(ctrlgate)); std::vector<idx> dims(n, d); // local dimensions vector // check that ctrl + gate subsystem is valid // with respect to local dimensions if (!internal::check_subsys_match_dims(ctrlgate, dims)) throw exception::SubsysMismatchDims("qpp::Gates::CTRL()"); // check that target list match the dimension of the matrix using Index = typename dyn_mat<typename Derived::Scalar>::Index; if (rA.rows() != static_cast<Index>(std::llround(std::pow(d, target.size())))) throw exception::DimsMismatchMatrix("qpp::Gates::CTRL()"); // check shift if (!shift.empty() && (shift.size() != ctrl.size())) throw exception::SizeMismatch("qpp::Gates::CTRL()"); if (!shift.empty()) for (auto&& elem : shift) if (elem >= d) throw exception::OutOfRange("qpp::Gates::CTRL()"); // END EXCEPTION CHECKS if (shift.empty()) shift = std::vector<idx>(ctrl.size(), 0); // Use static allocation for speed! idx Cdims[maxn]; idx midx_row[maxn]; idx midx_col[maxn]; idx CdimsA[maxn]; idx midxA_row[maxn]; idx midxA_col[maxn]; idx Cdims_bar[maxn]; idx Csubsys_bar[maxn]; idx midx_bar[maxn]; idx n_gate = target.size(); idx n_ctrl = ctrl.size(); idx n_subsys_bar = n - ctrlgate.size(); idx D = static_cast<idx>(std::llround(std::pow(d, n))); idx DA = static_cast<idx>(rA.rows()); idx Dsubsys_bar = static_cast<idx>(std::llround(std::pow(d, n_subsys_bar))); // compute the complementary subsystem of ctrlgate w.r.t. dims std::vector<idx> subsys_bar = complement(ctrlgate, n); std::copy(std::begin(subsys_bar), std::end(subsys_bar), std::begin(Csubsys_bar)); for (idx k = 0; k < n; ++k) { midx_row[k] = midx_col[k] = 0; Cdims[k] = d; } for (idx k = 0; k < n_subsys_bar; ++k) { Cdims_bar[k] = d; midx_bar[k] = 0; } for (idx k = 0; k < n_gate; ++k) { midxA_row[k] = midxA_col[k] = 0; CdimsA[k] = d; } dyn_mat<typename Derived::Scalar> result = dyn_mat<typename Derived::Scalar>::Identity(D, D); dyn_mat<typename Derived::Scalar> Ak; // run over the complement indexes for (idx i = 0; i < Dsubsys_bar; ++i) { // get the complement row multi-index internal::n2multiidx(i, n_subsys_bar, Cdims_bar, midx_bar); for (idx k = 0; k < d; ++k) { Ak = powm(rA, k); // compute rA^k // run over the target row multi-index for (idx a = 0; a < DA; ++a) { // get the target row multi-index internal::n2multiidx(a, n_gate, CdimsA, midxA_row); // construct the result row multi-index // first the ctrl part (equal for both row and column) for (idx c = 0; c < n_ctrl; ++c) midx_row[ctrl[c]] = midx_col[ctrl[c]] = (k + d - shift[c]) % d; // then the complement part (equal for column) for (idx c = 0; c < n_subsys_bar; ++c) midx_row[Csubsys_bar[c]] = midx_col[Csubsys_bar[c]] = midx_bar[c]; // then the target part for (idx c = 0; c < n_gate; ++c) midx_row[target[c]] = midxA_row[c]; // run over the target column multi-index for (idx b = 0; b < DA; ++b) { // get the target column multi-index internal::n2multiidx(b, n_gate, CdimsA, midxA_col); // construct the result column multi-index for (idx c = 0; c < n_gate; ++c) midx_col[target[c]] = midxA_col[c]; // finally write the values result(internal::multiidx2n(midx_row, n, Cdims), internal::multiidx2n(midx_col, n, Cdims)) = Ak(a, b); } } } } return result; } /** * \brief Expands out * \see qpp::kron() * * Expands out \a A as a matrix in a multi-partite system. Faster than using * qpp::kron(I, I, ..., I, A, I, ..., I). * * \param A Eigen expression * \param pos Position * \param dims Dimensions of the multi-partite system * \return Tensor product * \f$ I\otimes\cdots\otimes I\otimes A \otimes I \otimes\cdots\otimes I\f$, * with \a A on position \a pos, as a dynamic matrix over the same scalar * field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> expandout(const Eigen::MatrixBase<Derived>& A, idx pos, const std::vector<idx>& dims) const { const dyn_mat<typename Derived::Scalar>& rA = A.derived(); // EXCEPTION CHECKS // check zero-size if (!internal::check_nonzero_size(rA)) throw exception::ZeroSize("qpp::Gates::expandout()"); // check that dims is a valid dimension vector if (!internal::check_dims(dims)) throw exception::DimsInvalid("qpp::Gates::expandout()"); // check square matrix if (!internal::check_square_mat(rA)) throw exception::MatrixNotSquare("qpp::Gates::expandout()"); // check that position is valid if (pos + 1 > dims.size()) throw exception::OutOfRange("qpp::Gates::expandout()"); // check that dims[pos] match the dimension of A if (static_cast<idx>(rA.rows()) != dims[pos]) throw exception::DimsMismatchMatrix("qpp::Gates::expandout()"); // END EXCEPTION CHECKS idx D = std::accumulate(std::begin(dims), std::end(dims), static_cast<idx>(1), std::multiplies<idx>()); dyn_mat<typename Derived::Scalar> result = dyn_mat<typename Derived::Scalar>::Identity(D, D); idx Cdims[maxn]; idx midx_row[maxn]; idx midx_col[maxn]; for (idx k = 0; k < dims.size(); ++k) { midx_row[k] = midx_col[k] = 0; Cdims[k] = dims[k]; } // run over the main diagonal multi-indexes for (idx i = 0; i < D; ++i) { // get row multi_index internal::n2multiidx(i, dims.size(), Cdims, midx_row); // get column multi_index (same as row) internal::n2multiidx(i, dims.size(), Cdims, midx_col); // run over the gate row multi-index for (idx a = 0; a < static_cast<idx>(rA.rows()); ++a) { // construct the total row multi-index midx_row[pos] = a; // run over the gate column multi-index for (idx b = 0; b < static_cast<idx>(rA.cols()); ++b) { // construct the total column multi-index midx_col[pos] = b; // finally write the values result(internal::multiidx2n(midx_row, dims.size(), Cdims), internal::multiidx2n(midx_col, dims.size(), Cdims)) = rA(a, b); } } } return result; } /** * \brief Expands out * \see qpp::kron() * * Expands out \a A as a matrix in a multi-partite system. Faster than using * qpp::kron(I, I, ..., I, A, I, ..., I). * * \note The std::initializer_list overload exists because otherwise, in the * degenerate case when \a dims has only one element, the one element list * is implicitly converted to the element's underlying type, i.e. qpp::idx, * which has the net effect of picking the wrong (non-vector) * qpp::expandout() overload * * \param A Eigen expression * \param pos Position * \param dims Dimensions of the multi-partite system * \return Tensor product * \f$ I\otimes\cdots\otimes I\otimes A \otimes I \otimes\cdots\otimes I\f$, * with \a A on position \a pos, as a dynamic matrix over the same scalar * field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> expandout(const Eigen::MatrixBase<Derived>& A, idx pos, const std::initializer_list<idx>& dims) const { return expandout(A, pos, std::vector<idx>(dims)); } /** * \brief Expands out * \see qpp::kron() * * Expands out \a A as a matrix in a multi-partite system. Faster than using * qpp::kron(I, I, ..., I, A, I, ..., I). * * \param A Eigen expression * \param pos Position * \param n Number of subsystems * \param d Subsystem dimensions * \return Tensor product * \f$ I\otimes\cdots\otimes I\otimes A \otimes I \otimes\cdots\otimes I\f$, * with \a A on position \a pos, as a dynamic matrix over the same scalar * field as \a A */ template <typename Derived> dyn_mat<typename Derived::Scalar> expandout(const Eigen::MatrixBase<Derived>& A, idx pos, idx n, idx d = 2) const { // EXCEPTION CHECKS // check zero size if (!internal::check_nonzero_size(A)) throw exception::ZeroSize("qpp::Gates::expandout()"); // check valid dims if (d == 0) throw exception::DimsInvalid("qpp::Gates::expandout()"); // END EXCEPTION CHECKS std::vector<idx> dims(n, d); // local dimensions vector return expandout(A, pos, dims); } // getters /** * \brief Get the name of the most common qubit gates * * \note Assumes that the gate \a U is represented by a square matrix. If * not, returns the empty string * * \param U Complex matrix representing the quantum gate * \return Name of the gate (if any), otherwise the empty string */ std::string get_name(const cmat& U) const { // EXCEPTION CHECKS // check zero size if (!internal::check_nonzero_size(U)) throw exception::ZeroSize("qpp::Gates::get_name()"); // check square matrix if (!internal::check_square_mat(U)) return ""; // END EXCEPTION CHECKS const idx D = static_cast<idx>(U.rows()); switch (D) { // 1 qubit gates case 2: if (U == Id2) return "Id2"; else if (U == H) return "H"; else if (U == X) return "X"; else if (U == Y) return "Y"; else if (U == Z) return "Z"; else if (U == S) return "S"; else if (U == T) return "T"; else return ""; break; // 2 qubit gates case 4: if (U == CNOT) return "CNOT"; else if (U == CZ) return "CZ"; else if (U == CNOTba) return "CNOTba"; else if (U == SWAP) return "SWAP"; else return ""; break; // 3 qubit gates case 8: if (U == TOF) return "TOF"; else if (U == FRED) return "FRED"; else return ""; break; default: return ""; } } // end getters }; /* class Gates */ } /* namespace qpp */ #endif /* CLASSES_GATES_H_ */
GB_unaryop__abs_fp32_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_fp32_int16 // op(A') function: GB_tran__abs_fp32_int16 // C type: float // A type: int16_t // cast: float cij = (float) aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ int16_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_fp32_int16 ( float *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_fp32_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
testing_dgemm.c
/** * * @file testing_dgemm.c * * PLASMA testing routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Emmanuel Agullo * @author Mathieu Faverge * @date 2010-11-15 * @generated d Tue Jan 7 11:45:18 2014 * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <plasma.h> #include <cblas.h> #include <lapacke.h> #include <core_blas.h> #include "testing_dmain.h" #undef COMPLEX #define REAL static int check_solution(PLASMA_enum transA, PLASMA_enum transB, int M, int N, int K, double alpha, double *A, int LDA, double *B, int LDB, double beta, double *Cref, double *Cplasma, int LDC); int testing_dgemm(int argc, char **argv) { /* Check for number of arguments*/ if ( argc != 8) { USAGE("GEMM", "alpha beta M N K LDA LDB LDC", " - alpha : alpha coefficient\n" " - beta : beta coefficient\n" " - M : number of rows of matrices A and C\n" " - N : number of columns of matrices B and C\n" " - K : number of columns of matrix A / number of rows of matrix B\n" " - LDA : leading dimension of matrix A\n" " - LDB : leading dimension of matrix B\n" " - LDC : leading dimension of matrix C\n"); return -1; } PLASMA_Set(PLASMA_TILE_SIZE, 128); double alpha = (double) atol(argv[0]); double beta = (double) atol(argv[1]); int M = atoi(argv[2]); int N = atoi(argv[3]); int K = atoi(argv[4]); int LDA = atoi(argv[5]); int LDB = atoi(argv[6]); int LDC = atoi(argv[7]); double eps; int info_solution; int i, j, ta, tb; int LDAxK = LDA*max(M,K); int LDBxN = LDB*max(K,N); int LDCxN = LDC*N; double *A = (double *)malloc(LDAxK*sizeof(double)); #pragma omp register( [LDAxK]A ) double *B = (double *)malloc(LDBxN*sizeof(double)); #pragma omp register( [LDBxN]B ) double *C = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register( [LDCxN]C ) double *Cinit = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register( [LDCxN]Cinit ) double *Cfinal = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register( [LDCxN]Cfinal ) /* Check if unable to allocate memory */ if ((!A)||(!B)||(!Cinit)||(!Cfinal)){ printf("Out of Memory \n "); return -2; } eps = LAPACKE_dlamch_work('e'); printf("\n"); printf("------ TESTS FOR PLASMA DGEMM ROUTINE ------- \n"); printf(" Size of the Matrix %d by %d\n", M, N); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n",eps); printf(" Computational tests pass if scaled residuals are less than 10.\n"); /*---------------------------------------------------------- * TESTING DGEMM */ /* Initialize A, B, C */ LAPACKE_dlarnv_work(IONE, ISEED, LDAxK, A); LAPACKE_dlarnv_work(IONE, ISEED, LDBxN, B); LAPACKE_dlarnv_work(IONE, ISEED, LDCxN, C); #ifdef COMPLEX for (ta=0; ta<3; ta++) { for (tb=0; tb<3; tb++) { #else for (ta=0; ta<2; ta++) { for (tb=0; tb<2; tb++) { #endif for ( i = 0; i < M; i++) for ( j = 0; j < N; j++) Cinit[LDC*j+i] = C[LDC*j+i]; for ( i = 0; i < M; i++) for ( j = 0; j < N; j++) Cfinal[LDC*j+i] = C[LDC*j+i]; /* PLASMA DGEMM */ PLASMA_dgemm(trans[ta], trans[tb], M, N, K, alpha, A, LDA, B, LDB, beta, Cfinal, LDC); /* Check the solution */ info_solution = check_solution(trans[ta], trans[tb], M, N, K, alpha, A, LDA, B, LDB, beta, Cinit, Cfinal, LDC); if (info_solution == 0) { printf("***************************************************\n"); printf(" ---- TESTING DGEMM (%s, %s) ............... PASSED !\n", transstr[ta], transstr[tb]); printf("***************************************************\n"); } else { printf("************************************************\n"); printf(" - TESTING DGEMM (%s, %s) ... FAILED !\n", transstr[ta], transstr[tb]); printf("************************************************\n"); } } } #ifdef _UNUSED_ }} #endif free(A); free(B); free(C); free(Cinit); free(Cfinal); return 0; } /*-------------------------------------------------------------- * Check the solution */ static int check_solution(PLASMA_enum transA, PLASMA_enum transB, int M, int N, int K, double alpha, double *A, int LDA, double *B, int LDB, double beta, double *Cref, double *Cplasma, int LDC) { int info_solution; double Anorm, Bnorm, Cinitnorm, Cplasmanorm, Clapacknorm, Rnorm, result; double eps; double beta_const; double *work = (double *)malloc(max(K,max(M, N))* sizeof(double)); int Am, An, Bm, Bn; beta_const = -1.0; if (transA == PlasmaNoTrans) { Am = M; An = K; } else { Am = K; An = M; } if (transB == PlasmaNoTrans) { Bm = K; Bn = N; } else { Bm = N; Bn = K; } Anorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), Am, An, A, LDA, work); Bnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), Bm, Bn, B, LDB, work); Cinitnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Cref, LDC, work); Cplasmanorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Cplasma, LDC, work); cblas_dgemm(CblasColMajor, (CBLAS_TRANSPOSE)transA, (CBLAS_TRANSPOSE)transB, M, N, K, (alpha), A, LDA, B, LDB, (beta), Cref, LDC); Clapacknorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Cref, LDC, work); cblas_daxpy(LDC * N, (beta_const), Cplasma, 1, Cref, 1); Rnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), M, N, Cref, LDC, work); eps = LAPACKE_dlamch_work('e'); printf("Rnorm %e, Anorm %e, Bnorm %e, Cinitnorm %e, Cplasmanorm %e, Clapacknorm %e\n", Rnorm, Anorm, Bnorm, Cinitnorm, Cplasmanorm, Clapacknorm); result = Rnorm / ((Anorm + Bnorm + Cinitnorm) * N * eps); printf("============\n"); printf("Checking the norm of the difference against reference DGEMM \n"); printf("-- ||Cplasma - Clapack||_oo/((||A||_oo+||B||_oo+||C||_oo).N.eps) = %e \n", result); if ( isnan(Rnorm) || isinf(Rnorm) || isnan(result) || isinf(result) || (result > 10.0) ) { printf("-- The solution is suspicious ! \n"); info_solution = 1; } else { printf("-- The solution is CORRECT ! \n"); info_solution= 0 ; } free(work); return info_solution; } int timing_dgemm(int argc, char **argv) { int transa; int transb; int m, n, k; double alpha, beta; int lda, ldb, ldc; double *A, *B, *C; double *C0; int bs, rep; int i; double start, end; double elapsed; FILE *log; int num_threads; //gemm transA, transB, M, N, K, alpha, A, lda, B, ldb, beta, C, ldc if (argc != 12) { fprintf(stderr, "GEMMs transa transb m n k alpha lda ldb beta ldc bs rep\n"); return 1;; } sscanf(argv[0], "%d", &transa); sscanf(argv[1], "%d", &transb); if ( !transa ) transa = PlasmaNoTrans; else transa = PlasmaTrans; if ( !transb ) transb = PlasmaNoTrans; else transb = PlasmaTrans; sscanf(argv[2], "%d", &m); sscanf(argv[3], "%d", &n); sscanf(argv[4], "%d", &k); sscanf(argv[5], "%lf", &alpha); sscanf(argv[6], "%d", &lda); sscanf(argv[7], "%d", &ldb); sscanf(argv[8], "%lf", &beta); sscanf(argv[9], "%d", &ldc); sscanf(argv[10], "%d", &bs); sscanf(argv[11], "%d", &rep); int dimA = max(m,k) * lda; int dimB = max(n,k) * ldb; int dimC = max(m,n) * ldc; A = malloc( dimA * sizeof(double)); #pragma omp register ([dimA]A) B = malloc( dimB * sizeof(double)); #pragma omp register ([dimB]B) C = malloc( dimC * sizeof(double)); #pragma omp register ([dimC]C) LAPACKE_dlarnv(IONE, ISEED, dimA, A); LAPACKE_dlarnv(IONE, ISEED, dimB, B); LAPACKE_dlarnv(IONE, ISEED, dimC, C); PLASMA_Set( PLASMA_TILE_SIZE, bs); elapsed = 0.0; for ( i = 0; i < rep; i++ ) { start = gtime(); PLASMA_dgemm(transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); end = gtime(); elapsed += end - start; } num_threads = omp_get_max_threads(); dump_info("plasma_dgemm.log", num_threads, elapsed, rep); free(A); free(B); free(C); return 0; }
GB_binop__eq_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_08__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_04__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int64) // A*D function (colscale): GB (_AxD__eq_int64) // D*A function (rowscale): GB (_DxB__eq_int64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int64) // C=scalar+B GB (_bind1st__eq_int64) // C=scalar+B' GB (_bind1st_tran__eq_int64) // C=A+scalar GB (_bind2nd__eq_int64) // C=A'+scalar GB (_bind2nd_tran__eq_int64) // C type: bool // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
levelset_convection_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Ruben Zorrilla // #if !defined(KRATOS_LEVELSET_CONVECTION_PROCESS_INCLUDED ) #define KRATOS_LEVELSET_CONVECTION_PROCESS_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/convection_diffusion_settings.h" #include "includes/define.h" #include "includes/kratos_flags.h" #include "elements/levelset_convection_element_simplex.h" #include "geometries/geometry_data.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "utilities/variable_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and convects a level set distance * on the top of it */ template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > class LevelSetConvectionProcess : public Process { public: KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1); KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS); ///@name Type Definitions ///@{ typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typedef SolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType; ///@} ///@name Pointer Definitions ///@{ /// Pointer definition of LevelSetConvectionProcess KRATOS_CLASS_POINTER_DEFINITION(LevelSetConvectionProcess); ///@} ///@name Life Cycle ///@{ /** */ LevelSetConvectionProcess( Variable<double>& rLevelSetVar, ModelPart& rBaseModelPart, typename TLinearSolver::Pointer plinear_solver, const double max_cfl = 1.0, const double cross_wind_stabilization_factor = 0.7, const unsigned int max_substeps = 0) : mrBaseModelPart(rBaseModelPart), mrLevelSetVar(rLevelSetVar), mMaxAllowedCFL(max_cfl), mMaxSubsteps(max_substeps) { KRATOS_TRY // Check that there is at least one element and node in the model const auto n_nodes = rBaseModelPart.NumberOfNodes(); const auto n_elems = rBaseModelPart.NumberOfElements(); KRATOS_ERROR_IF(n_nodes == 0) << "The model has no nodes." << std::endl; KRATOS_ERROR_IF(n_elems == 0) << "The model has no elements." << std::endl; VariableUtils().CheckVariableExists< Variable< double > >(rLevelSetVar, rBaseModelPart.Nodes()); VariableUtils().CheckVariableExists< Variable< array_1d < double, 3 > > >(VELOCITY, rBaseModelPart.Nodes()); if(TDim == 2){ KRATOS_ERROR_IF(rBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily() != GeometryData::Kratos_Triangle) << "In 2D the element type is expected to be a triangle" << std::endl; } else if(TDim == 3) { KRATOS_ERROR_IF(rBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily() != GeometryData::Kratos_Tetrahedra) << "In 3D the element type is expected to be a tetrahedra" << std::endl; } // Allocate if needed the variable DYNAMIC_TAU of the process info, and if it does not exist, set it to zero if( rBaseModelPart.GetProcessInfo().Has(DYNAMIC_TAU) == false){ rBaseModelPart.GetProcessInfo().SetValue(DYNAMIC_TAU,0.0); } // Allocate if needed the variable CONVECTION_DIFFUSION_SETTINGS of the process info, and create it if it does not exist if( rBaseModelPart.GetProcessInfo().Has(CONVECTION_DIFFUSION_SETTINGS) == false){ ConvectionDiffusionSettings::Pointer p_conv_diff_settings = Kratos::make_unique<ConvectionDiffusionSettings>(); rBaseModelPart.GetProcessInfo().SetValue(CONVECTION_DIFFUSION_SETTINGS, p_conv_diff_settings); p_conv_diff_settings->SetUnknownVariable(rLevelSetVar); p_conv_diff_settings->SetConvectionVariable(VELOCITY); } // Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex mDistancePartIsInitialized = false; ReGenerateConvectionModelPart(rBaseModelPart); // Generate a linear strategy typename SchemeType::Pointer pscheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >(); typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverTypePointer; bool CalculateReactions = false; bool ReformDofAtEachIteration = false; bool CalculateNormDxFlag = false; BuilderSolverTypePointer pBuilderSolver = Kratos::make_shared< ResidualBasedBlockBuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver > >(plinear_solver); mpSolvingStrategy = Kratos::make_unique< ResidualBasedLinearStrategy<TSparseSpace,TDenseSpace,TLinearSolver > >( *mpDistanceModelPart, pscheme, plinear_solver, pBuilderSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag); mpSolvingStrategy->SetEchoLevel(0); rBaseModelPart.GetProcessInfo().SetValue(CROSS_WIND_STABILIZATION_FACTOR, cross_wind_stabilization_factor); //TODO: check flag DO_EXPENSIVE_CHECKS mpSolvingStrategy->Check(); KRATOS_CATCH("") } /// Destructor. ~LevelSetConvectionProcess() override { mrBaseModelPart.GetModel().DeleteModelPart("DistanceConvectionPart"); } ///@} ///@name Operators ///@{ void operator()(){ Execute(); } ///@} ///@name Operations ///@{ void Execute() override { KRATOS_TRY; if(mDistancePartIsInitialized == false){ ReGenerateConvectionModelPart(mrBaseModelPart); } // Evaluate steps needed to achieve target max_cfl const auto n_substep = EvaluateNumberOfSubsteps(); // Save the variables to be employed so that they can be restored after the solution ProcessInfo& rCurrentProcessInfo = mpDistanceModelPart->GetProcessInfo(); const auto & r_previous_var = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS)->GetUnknownVariable(); const double previous_delta_time = rCurrentProcessInfo.GetValue(DELTA_TIME); // Save current level set value and current and previous step velocity values #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(mpDistanceModelPart->NumberOfNodes()); ++i_node){ const auto it_node = mpDistanceModelPart->NodesBegin() + i_node; mVelocity[i_node] = it_node->FastGetSolutionStepValue(VELOCITY); mVelocityOld[i_node] = it_node->FastGetSolutionStepValue(VELOCITY,1); mOldDistance[i_node] = it_node->FastGetSolutionStepValue(mrLevelSetVar,1); } const double dt = previous_delta_time / static_cast<double>(n_substep); rCurrentProcessInfo.SetValue(DELTA_TIME, dt); rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS)->SetUnknownVariable(mrLevelSetVar); const int rank = mrBaseModelPart.GetCommunicator().MyPID(); for(unsigned int step = 1; step <= n_substep; ++step){ KRATOS_INFO_IF("LevelSetConvectionProcess", mpSolvingStrategy->GetEchoLevel() > 0 && rank == 0) << "Doing step "<< step << " of " << n_substep << std::endl; // Compute shape functions of old and new step const double Nold = 1.0 - static_cast<double>(step) / static_cast<double>(n_substep); const double Nnew = 1.0 - Nold; const double Nold_before = 1.0 - static_cast<double>(step-1) / static_cast<double>(n_substep); const double Nnew_before = 1.0 - Nold_before; // Emulate clone time step by copying the new distance onto the old one #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(mpDistanceModelPart->NumberOfNodes()); ++i_node){ auto it_node = mpDistanceModelPart->NodesBegin() + i_node; const array_1d<double,3>& v = mVelocity[i_node]; const array_1d<double,3>& v_old = mVelocityOld[i_node]; it_node->FastGetSolutionStepValue(VELOCITY) = Nold * v_old + Nnew * v; it_node->FastGetSolutionStepValue(VELOCITY, 1) = Nold_before * v_old + Nnew_before * v; it_node->FastGetSolutionStepValue(mrLevelSetVar, 1) = it_node->FastGetSolutionStepValue(mrLevelSetVar); } mpSolvingStrategy->Solve(); } // Reset the processinfo to the original settings rCurrentProcessInfo.SetValue(DELTA_TIME, previous_delta_time); rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS)->SetUnknownVariable(r_previous_var); // Reset the velocities and levelset values to the one saved before the solution process #pragma omp parallel for for (int i_node = 0; i_node < static_cast<int>(mpDistanceModelPart->NumberOfNodes()); ++i_node){ auto it_node = mpDistanceModelPart->NodesBegin() + i_node; it_node->FastGetSolutionStepValue(VELOCITY) = mVelocity[i_node]; it_node->FastGetSolutionStepValue(VELOCITY,1) = mVelocityOld[i_node]; it_node->FastGetSolutionStepValue(mrLevelSetVar,1) = mOldDistance[i_node]; } KRATOS_CATCH("") } virtual void Clear(){ mpDistanceModelPart->Nodes().clear(); mpDistanceModelPart->Conditions().clear(); mpDistanceModelPart->Elements().clear(); // mpDistanceModelPart->GetProcessInfo().clear(); mDistancePartIsInitialized = false; mpSolvingStrategy->Clear(); mVelocity.clear(); mVelocityOld.clear(); mOldDistance.clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "LevelSetConvectionProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "LevelSetConvectionProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ModelPart& mrBaseModelPart; ModelPart* mpDistanceModelPart; Variable<double>& mrLevelSetVar; const double mMaxAllowedCFL; bool mDistancePartIsInitialized; const unsigned int mMaxSubsteps; std::vector< double > mOldDistance; std::vector< array_1d<double,3> > mVelocity, mVelocityOld; typename SolvingStrategyType::UniquePointer mpSolvingStrategy; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Constructor without linear solver for derived classes LevelSetConvectionProcess( Variable<double> &rLevelSetVar, ModelPart &rBaseModelPart, const double MaxCFL = 1.0, const unsigned int MaxSubSteps = 0) : mrBaseModelPart(rBaseModelPart), mrLevelSetVar(rLevelSetVar), mMaxAllowedCFL(MaxCFL), mMaxSubsteps(MaxSubSteps) { mDistancePartIsInitialized = false; } virtual void ReGenerateConvectionModelPart(ModelPart& rBaseModelPart){ KRATOS_TRY Model& current_model = rBaseModelPart.GetModel(); if(current_model.HasModelPart("DistanceConvectionPart")) current_model.DeleteModelPart("DistanceConvectionPart"); mpDistanceModelPart= &(current_model.CreateModelPart("DistanceConvectionPart")); // Check buffer size const auto base_buffer_size = rBaseModelPart.GetBufferSize(); KRATOS_ERROR_IF(base_buffer_size < 2) << "Base model part buffer size is " << base_buffer_size << ". Set it to a minimum value of 2." << std::endl; // Generate mpDistanceModelPart->Nodes().clear(); mpDistanceModelPart->Conditions().clear(); mpDistanceModelPart->Elements().clear(); mpDistanceModelPart->SetProcessInfo(rBaseModelPart.pGetProcessInfo()); mpDistanceModelPart->SetBufferSize(base_buffer_size); mpDistanceModelPart->SetProperties(rBaseModelPart.pProperties()); mpDistanceModelPart->Tables() = rBaseModelPart.Tables(); // Assigning the nodes to the new model part mpDistanceModelPart->Nodes() = rBaseModelPart.Nodes(); // Ensure that the nodes have distance as a DOF VariableUtils().AddDof< Variable < double> >(mrLevelSetVar, rBaseModelPart); // Generating the elements mpDistanceModelPart->Elements().reserve(rBaseModelPart.NumberOfElements()); for (auto it_elem = rBaseModelPart.ElementsBegin(); it_elem != rBaseModelPart.ElementsEnd(); ++it_elem){ Element::Pointer p_element = Kratos::make_shared< LevelSetConvectionElementSimplex < TDim, TDim+1 > >( it_elem->Id(), it_elem->pGetGeometry(), it_elem->pGetProperties()); // Assign EXACTLY THE SAME GEOMETRY, so that memory is saved!! p_element->pGetGeometry() = it_elem->pGetGeometry(); mpDistanceModelPart->Elements().push_back(p_element); } // Next is for mpi (but mpi would also imply calling an mpi strategy) Communicator::Pointer pComm = rBaseModelPart.GetCommunicator().Create(); mpDistanceModelPart->SetCommunicator(pComm); // Resize the arrays const auto n_nodes = mpDistanceModelPart->NumberOfNodes(); mVelocity.resize(n_nodes); mVelocityOld.resize(n_nodes); mOldDistance.resize(n_nodes); mDistancePartIsInitialized = true; KRATOS_CATCH("") } unsigned int EvaluateNumberOfSubsteps(){ // First of all compute the cfl number const auto n_elem = mpDistanceModelPart->NumberOfElements(); const double dt = mpDistanceModelPart->GetProcessInfo()[DELTA_TIME]; // Vector where each thread will store its maximum (VS does not support OpenMP reduce max) int NumThreads = OpenMPUtils::GetNumThreads(); std::vector<double> list_of_max_local_cfl(NumThreads, 0.0); //TODO: Update this loop to avoid using thread id #pragma omp parallel shared(list_of_max_local_cfl) for(int i_elem = 0; i_elem < static_cast<int>(n_elem); i_elem++){ const auto it_elem = mpDistanceModelPart->ElementsBegin() + i_elem; Geometry< Node<3> >& r_geom = it_elem->GetGeometry(); double vol; array_1d<double, TDim+1 > N; BoundedMatrix<double, TDim+1, TDim > DN_DX; GeometryUtils::CalculateGeometryData(r_geom, DN_DX, N, vol); int k = OpenMPUtils::ThisThread(); double& max_cfl = list_of_max_local_cfl[k]; // Compute h double h=0.0; for(unsigned int i=0; i<TDim+1; i++){ double h_inv = 0.0; for(unsigned int k=0; k<TDim; k++){ h_inv += DN_DX(i,k)*DN_DX(i,k); } h += 1.0/h_inv; } h = sqrt(h)/static_cast<double>(TDim+1); // Get average velocity at the nodes array_1d<double, 3 > vgauss = ZeroVector(3); for(unsigned int i=0; i<TDim+1; i++){ vgauss += N[i]* r_geom[i].FastGetSolutionStepValue(VELOCITY); } double cfl_local = norm_2(vgauss) / h; if(cfl_local > max_cfl){ max_cfl = cfl_local; } } // Now we get the maximum at each thread level double max_cfl_found = 0.0; for (int k=0; k < NumThreads;k++){ if (max_cfl_found < list_of_max_local_cfl[k]){ max_cfl_found = list_of_max_local_cfl[k]; } } max_cfl_found *= dt; // Synchronize maximum CFL between processes mpDistanceModelPart->GetCommunicator().MaxAll(max_cfl_found); unsigned int n_steps = static_cast<unsigned int>(max_cfl_found / mMaxAllowedCFL); if(n_steps < 1){ n_steps = 1; } // Now we compare with the maximum set if (mMaxSubsteps > 0 && mMaxSubsteps < n_steps){ n_steps = mMaxSubsteps; } return n_steps; } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. LevelSetConvectionProcess& operator=(LevelSetConvectionProcess const& rOther); /// Copy constructor. //LevelSetConvectionProcess(LevelSetConvectionProcess const& rOther); ///@} }; // Class LevelSetConvectionProcess // Avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0)); template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1)); ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// Input stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::istream& operator >> ( std::istream& rIStream, LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>& rThis); /// Output stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::ostream& operator << ( std::ostream& rOStream, const LevelSetConvectionProcess<TDim, TSparseSpace, TDenseSpace, TLinearSolver>& rThis){ rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_LEVELSET_CONVECTION_PROCESS_INCLUDED defined
bml_norm_ellsort_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_norm.h" #include "../bml_parallel.h" #include "../bml_types.h" #include "bml_norm_ellsort.h" #include "bml_types_ellsort.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Calculate the sum of squares of the elements of a matrix. * * \ingroup norm_group * * \param A The matrix A * \return The sum of squares of A */ double TYPED_FUNC( bml_sum_squares_ellsort) ( bml_matrix_ellsort_t * A) { int N = A->N; int M = A->M; int *A_nnz = (int *) A->nnz; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; int myRank = bml_getMyRank(); #pragma omp parallel for \ shared(N, M, A_value, A_nnz) \ shared(A_localRowMin, A_localRowMax, myRank) \ reduction(+:sum) //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { for (int j = 0; j < A_nnz[i]; j++) { REAL_T xval = A_value[ROWMAJOR(i, j, N, M)]; sum += xval * xval; } } return (double) REAL_PART(sum); } /** Calculate the sum of squares of all the core elements of a submatrix. * * \ingroup norm_group * * \param A The matrix * \param core_pos Core rows of submatrix * \param core_size Number of core rows * \return The sum of squares of A */ double TYPED_FUNC( bml_sum_squares_submatrix_ellsort) ( bml_matrix_ellsort_t * A, int core_size) { int N = A->N; int M = A->M; int *A_index = (int *) A->index; int *A_nnz = (int *) A->nnz; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; #pragma omp parallel for \ shared(N, M, A_index, A_nnz, A_value) \ reduction(+:sum) for (int i = 0; i < core_size; i++) { for (int j = 0; j < A_nnz[i]; j++) { if (A_index[ROWMAJOR(i, j, N, M)] < core_size) { REAL_T value = A_value[ROWMAJOR(i, j, N, M)]; sum += value * value; } } } return (double) REAL_PART(sum); } /** Calculate the sum of squares of the elements of \alpha A + \beta B. * * \ingroup norm_group * * \param A The matrix A * \param B The matrix B * \param alpha Multiplier for A * \param beta Multiplier for B * \pram threshold Threshold * \return The sum of squares of \alpha A + \beta B */ double TYPED_FUNC( bml_sum_squares2_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B, double alpha, double beta, double threshold) { int A_N = A->N; int A_M = A->M; int B_N = B->N; int B_M = B->M; int *A_index = (int *) A->index; int *A_nnz = (int *) A->nnz; int *B_index = (int *) B->index; int *B_nnz = (int *) B->nnz; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T sum = 0.0; REAL_T *A_value = (REAL_T *) A->value; REAL_T *B_value = (REAL_T *) B->value; REAL_T alpha_ = (REAL_T) alpha; REAL_T beta_ = (REAL_T) beta; int myRank = bml_getMyRank(); #if !(defined(__IBMC__) || defined(__ibmxl__)) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(y, 0.0, A_N * sizeof(REAL_T)); memset(ix, 0, A_N * sizeof(int)); memset(jjb, 0, A_N * sizeof(int)); #endif #if defined(__IBMC__) || defined(__ibmxl__) #pragma omp parallel for \ shared(alpha_, beta_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ reduction(+:sum) #else #pragma omp parallel for \ shared(alpha_, beta_) \ shared(A_N, A_M, A_index, A_nnz, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_N, B_M, B_index, B_nnz, B_value) \ firstprivate(ix, jjb, y) \ reduction(+:sum) #endif //for (int i = 0; i < A_N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { #if defined(__IBMC__) || defined(__ibmxl__) REAL_T y[A_N]; int ix[A_N], jjb[A_N]; memset(ix, 0, A_N * sizeof(int)); #endif int l = 0; for (int jp = 0; jp < A_nnz[i]; jp++) { int k = A_index[ROWMAJOR(i, jp, A_N, A_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] += alpha_ * A_value[ROWMAJOR(i, jp, A_N, A_M)]; } for (int jp = 0; jp < B_nnz[i]; jp++) { int k = B_index[ROWMAJOR(i, jp, B_N, B_M)]; if (ix[k] == 0) { y[k] = 0.0; ix[k] = i + 1; jjb[l] = k; l++; } y[k] += beta_ * B_value[ROWMAJOR(i, jp, B_N, B_M)]; } for (int jp = 0; jp < l; jp++) { if (ABS(y[jjb[jp]]) > threshold) sum += y[jjb[jp]] * y[jjb[jp]]; ix[jjb[jp]] = 0; y[jjb[jp]] = 0.0; jjb[jp] = 0; } } return (double) REAL_PART(sum); } /** Calculate the Frobenius norm of matrix A. * * \ingroup norm_group * * \param A The matrix A * \return The Frobenius norm of A */ double TYPED_FUNC( bml_fnorm_ellsort) ( bml_matrix_ellsort_t * A) { double fnorm = TYPED_FUNC(bml_sum_squares_ellsort) (A); #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_sumRealReduce(&fnorm); } #endif fnorm = sqrt(fnorm); return (double) REAL_PART(fnorm); } /** Calculate the Frobenius norm of 2 matrices. * * \ingroup norm_group * * \param A The matrix A * \param B The matrix B * \return The Frobenius norm of A-B */ double TYPED_FUNC( bml_fnorm2_ellsort) ( bml_matrix_ellsort_t * A, bml_matrix_ellsort_t * B) { int N = A->N; int M = A->M; double fnorm = 0.0; REAL_T rvalue; int *A_nnz = (int *) A->nnz; int *A_index = (int *) A->index; int *A_localRowMin = A->domain->localRowMin; int *A_localRowMax = A->domain->localRowMax; REAL_T *A_value = (REAL_T *) A->value; int *B_nnz = (int *) B->nnz; int *B_index = (int *) B->index; REAL_T *B_value = (REAL_T *) B->value; REAL_T temp; int myRank = bml_getMyRank(); #pragma omp parallel for \ private(rvalue, temp) \ shared(N, M, A_nnz, A_index, A_value) \ shared(A_localRowMin, A_localRowMax, myRank) \ shared(B_nnz, B_index, B_value) \ reduction(+:fnorm) //for (int i = 0; i < N; i++) for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++) { for (int j = 0; j < A_nnz[i]; j++) { for (int k = 0; k < B_nnz[i]; k++) { if (A_index[ROWMAJOR(i, j, N, M)] == B_index[ROWMAJOR(i, k, N, M)]) { rvalue = B_value[ROWMAJOR(i, k, N, M)]; break; } rvalue = 0.0; } temp = A_value[ROWMAJOR(i, j, N, M)] - rvalue; fnorm += temp * temp; } for (int j = 0; j < B_nnz[i]; j++) { for (int k = 0; k < A_nnz[i]; k++) { if (A_index[ROWMAJOR(i, k, N, M)] == B_index[ROWMAJOR(i, j, N, M)]) { rvalue = A_value[ROWMAJOR(i, k, N, M)]; break; } rvalue = 0.0; } if (rvalue == 0.0) { temp = B_value[ROWMAJOR(i, j, N, M)]; fnorm += temp * temp; } } } #ifdef DO_MPI if (bml_getNRanks() > 1 && A->distribution_mode == distributed) { bml_sumRealReduce(&fnorm); } #endif fnorm = sqrt(fnorm); return (double) REAL_PART(fnorm); }
GB_unop__log10_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__log10_fc32_fc32) // op(A') function: GB (_unop_tran__log10_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_clog10f (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog10f (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_clog10f (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__log10_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog10f (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_clog10f (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__log10_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
O1Normal3D.c
#include <mpi.h> #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_temp; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_dvg; void O1Normal3D(GRID * g) { { size_t min_block = g->mpi_rank == (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->cBlkCnt - 1) / (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->cBlkCnt % (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->cBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t cell_index = (0); cell_index < (g->blkSize); cell_index++) { gv_temp->data_pointer.p3[(block_index)][(height_index)][(cell_index)] += 0.05 * gv_dvg->data_pointer.p3[(block_index)][(height_index)][(cell_index)]; } } } } }
mxv_col_omp.c
/* !----------------------------------------------------------------------- ! Author: Ruud van der Pas, Sun Microsystems ! ! Copyright: Sun Microsystems, All rights reserved, Un-authorized ! distribution not permitted !----------------------------------------------------------------------- */ #include "labs.h" #ifdef _OPENMP #include <omp.h> #endif void mxv_col(int m, int n, double *a, double *b, double *c) { int i, j; // threshold_col = 375; # pragma omp parallel if (m > threshold_col) default (none) \ private (i,j) shared(a, b, c, n, m) { #pragma omp for schedule (dynamic) for (i=0; i<m; i++) a[i] = b[i*n]*c[0]; for (j=1; j<n; j++) { #pragma omp for schedule (dynamic) for (i=0; i<m; i++) a[i] += b[i*n+j]*c[j]; } } /* -- End of parallel region --*/ }
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const double x_shear,const double x_shear, % const double width,const double height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const double x_shear,const double y_shear, const double width,const double height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=(ssize_t) ceil(min.x-0.5); geometry.y=(ssize_t) ceil(min.y-0.5); geometry.width=(size_t) floor(max.x-min.x+0.5); geometry.height=(size_t) floor(max.y-min.y+0.5); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The result will be auto-croped if the artifact "deskew:auto-crop" is % defined, while the amount the image is to be deskewed, in degrees is also % saved as the artifact "deskew:angle". % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrixs, MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection) { MatrixInfo *swap; register MatrixInfo *p, *q; register ssize_t x; size_t step; p=source_matrixs; q=destination_matrixs; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrixs=AcquireMatrixInfo(width,image->rows, sizeof(unsigned short),exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs=DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } for (j=0; j < 256; j++) { c=(unsigned char) j; for (count=0; c != 0; c>>=1) count+=c & 0x01; bits[j]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,-1,projection); (void) NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,1,projection); image_view=DestroyCacheView(image_view); destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; PixelInfo background; double count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(image,p); background.green+=QuantumScale*GetPixelGreen(image,p); background.blue+=QuantumScale*GetPixelBlue(image,p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha+=QuantumScale*GetPixelAlpha(image,p); count++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->background_color.red=(double) ClampToQuantum(QuantumRange* background.red/count); image->background_color.green=(double) ClampToQuantum(QuantumRange* background.green/count); image->background_color.blue=(double) ClampToQuantum(QuantumRange* background.blue/count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha=(double) ClampToQuantum(QuantumRange* background.alpha/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MagickPathExtent]; (void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod, exception); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; if (rotations == 0) return(CloneImage(image,0,0,MagickTrue,exception)); if ((rotations == 1) || (rotations == 3)) rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); else rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (rotate_image == (Image *) NULL) return((Image *) NULL); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image,tile_pixels) <= (QuantumRange/2)) { tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { register ssize_t y; /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(rotate_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(rotate_image); if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((width-1)-y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; if (GetPixelWriteMask(image,tile_pixels) <= (QuantumRange/2)) { tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; background=image->background_color; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=x_offset*GetPixelChannels(image); displacement=degrees*(double) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (x_offset+width+step-i) > image->columns) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_XShearImage) #endif proceed=SetImageProgress(image,XShearImageTag,progress++,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; background=image->background_color; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { ssize_t step; double area, displacement; PixelInfo pixel, source, destination; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=y_offset*GetPixelChannels(image); displacement=degrees*(double) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (y_offset+height+step-i) > image->rows) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_YShearImage) #endif proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute image size. */ bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5); bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)- image->columns)/2.0-0.5); bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)- image->rows)/2.0-0.5); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,image->compose,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->alpha_trait=image->alpha_trait; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5); bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,image->compose, exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->alpha_trait=image->alpha_trait; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
GB_binop__pair_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pair_uint32) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A.*B function (eWiseMult): GB ((none)) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pair_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__pair_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint32) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = 1 #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = 1 ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PAIR || GxB_NO_UINT32 || GxB_NO_PAIR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pair_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pair_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pair_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pair_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = 1 ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = 1 ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
rmse.c
/*************************************************************************/ /** File: rmse.c **/ /** Description: calculate root mean squared error of particular **/ /** clustering. **/ /** Author: Sang-Ha Lee **/ /** University of Virginia. **/ /** **/ /** Note: euclid_dist_2() and find_nearest_point() adopted from **/ /** Minebench code. **/ /** **/ /*************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #include "kmeans.h" extern double wtime(void); /*----< euclid_dist_2() >----------------------------------------------------*/ /* multi-dimensional spatial Euclid distance square */ __inline float euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]); return(ans); } /*----< find_nearest_point() >-----------------------------------------------*/ __inline int find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float **pts, /* [npts][nfeatures] */ int npts) { int index, i; float max_dist=FLT_MAX; /* find the cluster center id with min distance to pt */ for (i=0; i<npts; i++) { float dist; dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */ if (dist < max_dist) { max_dist = dist; index = i; } } return(index); } /*----< rms_err(): calculates RMSE of clustering >-------------------------------------*/ float rms_err (float **feature, /* [npoints][nfeatures] */ int nfeatures, int npoints, float **cluster_centres, /* [nclusters][nfeatures] */ int nclusters) { int i; int nearest_cluster_index; /* cluster center id with min distance to pt */ float sum_euclid = 0.0; /* sum of Euclidean distance squares */ float ret; /* return value */ /* calculate and sum the sqaure of euclidean distance*/ #pragma omp parallel for \ shared(feature,cluster_centres) \ firstprivate(npoints,nfeatures,nclusters) \ private(i, nearest_cluster_index) \ schedule (static) for (i=0; i<npoints; i++) { nearest_cluster_index = find_nearest_point(feature[i], nfeatures, cluster_centres, nclusters); sum_euclid += euclid_dist_2(feature[i], cluster_centres[nearest_cluster_index], nfeatures); } /* divide by n, then take sqrt */ ret = sqrt(sum_euclid / npoints); return(ret); }
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/functors/AddAssign.h> #include <blaze/math/functors/Assign.h> #include <blaze/math/functors/DivAssign.h> #include <blaze/math/functors/MultAssign.h> #include <blaze/math/functors/SubAssign.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/mpl/And.h> #include <blaze/util/mpl/Not.h> #include <blaze/util/mpl/Or.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_<VT1>; using ET2 = ElementType_<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable<ET1,ET2>::value ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_<VT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, Assign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, AddAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, SubAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { multAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, MultAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1> , Or< Not< IsSMPAssignable<VT1> > , Not< IsSMPAssignable<VT2> > > > > smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); divAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > > smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { divAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, DivAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
GB_unop__identity_uint64_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_uint64_bool // op(A') function: GB_unop_tran__identity_uint64_bool // C type: uint64_t // A type: bool // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_uint64_bool ( uint64_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_uint64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_pack1to4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_transform_kernel_pack1to4_bf16s_neon(const Mat& weight_data, Mat& weight_data_bf16, int num_input, int num_output, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // src = kw-kh-inch-outch // dst = 4b-kw-kh-inch-outch/4b Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output); weight_data_bf16.create(maxk, num_input, num_output / 4, (size_t)2 * 4, 4); for (int q = 0; q + 3 < num_output; q += 4) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); Mat g0 = weight_data_bf16.channel(q / 4); for (int p = 0; p < num_input; p++) { const float* k00 = k0.row(p); const float* k10 = k1.row(p); const float* k20 = k2.row(p); const float* k30 = k3.row(p); unsigned short* g00 = g0.row<unsigned short>(p); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k10[k]); g00[2] = float32_to_bfloat16(k20[k]); g00[3] = float32_to_bfloat16(k30[k]); g00 += 4; } } } } static void convolution_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_bf16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { unsigned short* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float32x4_t _sum = vdupq_n_f32(0.f); if (bias_data_ptr) { _sum = vld1q_f32(bias_data_ptr + p * 4); } const unsigned short* kptr = weight_data_bf16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const unsigned short* sptr = m.row<const unsigned short>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { float32x4_t _val = vdupq_n_f32(bfloat16_to_float32(sptr[space_ofs[k]])); float32x4_t _w = vcvt_f32_bf16(vld1_u16(kptr)); _sum = vmlaq_f32(_sum, _val, _w); kptr += 4; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1_u16(outptr + j * 4, vcvt_bf16_f32(_sum)); } outptr += outw * 4; } } }
simple_for.c
#include <stdio.h> #include "omp.h" /* Basado en el tutorial: http://openmp.org/mp-documents/omp-hands-on-SC08.pdf */ void main (){ int a[20]; int id; int i; omp_set_num_threads(4); #pragma omp parallel for for (i = 0 ; i < 20; i++){ id = omp_get_thread_num(); a[i] = id; } for (i = 0 ; i < 20; i++){ printf("a[%d]=%d\n", i, a[i]); } }
convolution_3x3_pack1ton_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); out0.fill(_bias0); const __fp16* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl); vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl); vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl); vfloat16m1_t _k10 = vle16_v_f16m1(k0 + packn * 3, vl); vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn * 4, vl); vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 5, vl); vfloat16m1_t _k20 = vle16_v_f16m1(k0 + packn * 6, vl); vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn * 7, vl); vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 8, vl); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl); vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl); vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl); vfloat16m1_t _sum4 = vle16_v_f16m1(outptr0 + packn * 4, vl); vfloat16m1_t _sum5 = vle16_v_f16m1(outptr0 + packn * 5, vl); vfloat16m1_t _sum6 = vle16_v_f16m1(outptr0 + packn * 6, vl); vfloat16m1_t _sum7 = vle16_v_f16m1(outptr0 + packn * 7, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[1], _k00, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[2], _k00, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[3], _k00, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r0[4], _k00, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r0[5], _k00, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r0[6], _k00, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r0[7], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k01, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[3], _k01, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[4], _k01, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r0[5], _k01, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r0[6], _k01, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r0[7], _k01, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r0[8], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k02, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k02, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[5], _k02, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r0[6], _k02, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r0[7], _k02, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r0[8], _k02, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r0[9], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[1], _k10, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[2], _k10, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[3], _k10, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r1[4], _k10, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r1[5], _k10, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r1[6], _k10, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r1[7], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k11, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[3], _k11, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[4], _k11, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r1[5], _k11, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r1[6], _k11, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r1[7], _k11, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r1[8], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k12, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k12, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[5], _k12, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r1[6], _k12, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r1[7], _k12, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r1[8], _k12, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r1[9], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[1], _k20, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[2], _k20, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[3], _k20, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r2[4], _k20, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r2[5], _k20, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r2[6], _k20, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r2[7], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k21, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[3], _k21, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[4], _k21, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r2[5], _k21, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r2[6], _k21, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r2[7], _k21, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r2[8], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k22, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k22, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[5], _k22, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r2[6], _k22, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r2[7], _k22, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r2[8], _k22, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r2[9], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl); vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl); vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl); vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl); outptr0 += packn * 8; r0 += 8; r1 += 8; r2 += 8; } for (; j + 3 < outw; j += 4) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl); vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl); vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[1], _k00, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[2], _k00, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[3], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k01, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[3], _k01, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[4], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k02, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k02, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[5], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[1], _k10, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[2], _k10, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[3], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k11, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[3], _k11, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[4], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k12, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k12, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[5], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[1], _k20, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[2], _k20, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[3], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k21, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[3], _k21, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[4], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k22, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k22, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[5], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); outptr0 += packn * 4; r0 += 4; r1 += 4; r2 += 4; } for (; j + 1 < outw; j += 2) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[1], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[1], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[1], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); outptr0 += packn * 2; r0 += 2; r1 += 2; r2 += 2; } for (; j < outw; j++) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); outptr0 += packn; r0 += 1; r1 += 1; r2 += 1; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9 * packn; } } } static void conv3x3s2_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); vfloat16m1_t _bias0 = bias ? vle16_v_f16m1(bias + p * packn, vl) : vfmv_v_f_f16m1(0.f, vl); out0.fill(_bias0); const __fp16* k0 = kernel.channel(p); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); vfloat16m1_t _k00 = vle16_v_f16m1(k0, vl); vfloat16m1_t _k01 = vle16_v_f16m1(k0 + packn, vl); vfloat16m1_t _k02 = vle16_v_f16m1(k0 + packn * 2, vl); vfloat16m1_t _k10 = vle16_v_f16m1(k0 + packn * 3, vl); vfloat16m1_t _k11 = vle16_v_f16m1(k0 + packn * 4, vl); vfloat16m1_t _k12 = vle16_v_f16m1(k0 + packn * 5, vl); vfloat16m1_t _k20 = vle16_v_f16m1(k0 + packn * 6, vl); vfloat16m1_t _k21 = vle16_v_f16m1(k0 + packn * 7, vl); vfloat16m1_t _k22 = vle16_v_f16m1(k0 + packn * 8, vl); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl); vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl); vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl); vfloat16m1_t _sum4 = vle16_v_f16m1(outptr0 + packn * 4, vl); vfloat16m1_t _sum5 = vle16_v_f16m1(outptr0 + packn * 5, vl); vfloat16m1_t _sum6 = vle16_v_f16m1(outptr0 + packn * 6, vl); vfloat16m1_t _sum7 = vle16_v_f16m1(outptr0 + packn * 7, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k00, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[6], _k00, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r0[8], _k00, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r0[10], _k00, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r0[12], _k00, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r0[14], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[5], _k01, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[7], _k01, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r0[9], _k01, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r0[11], _k01, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r0[13], _k01, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r0[15], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[6], _k02, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[8], _k02, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r0[10], _k02, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r0[12], _k02, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r0[14], _k02, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r0[16], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k10, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[6], _k10, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r1[8], _k10, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r1[10], _k10, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r1[12], _k10, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r1[14], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[5], _k11, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[7], _k11, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r1[9], _k11, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r1[11], _k11, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r1[13], _k11, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r1[15], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[6], _k12, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[8], _k12, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r1[10], _k12, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r1[12], _k12, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r1[14], _k12, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r1[16], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k20, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[6], _k20, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r2[8], _k20, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r2[10], _k20, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r2[12], _k20, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r2[14], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[5], _k21, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[7], _k21, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r2[9], _k21, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r2[11], _k21, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r2[13], _k21, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r2[15], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[6], _k22, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[8], _k22, vl); _sum4 = vfmacc_vf_f16m1(_sum4, r2[10], _k22, vl); _sum5 = vfmacc_vf_f16m1(_sum5, r2[12], _k22, vl); _sum6 = vfmacc_vf_f16m1(_sum6, r2[14], _k22, vl); _sum7 = vfmacc_vf_f16m1(_sum7, r2[16], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); vse16_v_f16m1(outptr0 + packn * 4, _sum4, vl); vse16_v_f16m1(outptr0 + packn * 5, _sum5, vl); vse16_v_f16m1(outptr0 + packn * 6, _sum6, vl); vse16_v_f16m1(outptr0 + packn * 7, _sum7, vl); outptr0 += packn * 8; r0 += 16; r1 += 16; r2 += 16; } for (; j + 3 < outw; j += 4) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl); vfloat16m1_t _sum2 = vle16_v_f16m1(outptr0 + packn * 2, vl); vfloat16m1_t _sum3 = vle16_v_f16m1(outptr0 + packn * 3, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[4], _k00, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[6], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[5], _k01, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[7], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r0[6], _k02, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r0[8], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[4], _k10, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[6], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[5], _k11, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[7], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r1[6], _k12, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r1[8], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[4], _k20, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[6], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[5], _k21, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[7], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl); _sum2 = vfmacc_vf_f16m1(_sum2, r2[6], _k22, vl); _sum3 = vfmacc_vf_f16m1(_sum3, r2[8], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); vse16_v_f16m1(outptr0 + packn * 2, _sum2, vl); vse16_v_f16m1(outptr0 + packn * 3, _sum3, vl); outptr0 += packn * 4; r0 += 8; r1 += 8; r2 += 8; } for (; j + 1 < outw; j += 2) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); vfloat16m1_t _sum1 = vle16_v_f16m1(outptr0 + packn, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[2], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[3], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r0[4], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[2], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[3], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r1[4], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[2], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[3], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); _sum1 = vfmacc_vf_f16m1(_sum1, r2[4], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); vse16_v_f16m1(outptr0 + packn, _sum1, vl); outptr0 += packn * 2; r0 += 4; r1 += 4; r2 += 4; } for (; j < outw; j++) { vfloat16m1_t _sum0 = vle16_v_f16m1(outptr0, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[0], _k00, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[1], _k01, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r0[2], _k02, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[0], _k10, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[1], _k11, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r1[2], _k12, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[0], _k20, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[1], _k21, vl); _sum0 = vfmacc_vf_f16m1(_sum0, r2[2], _k22, vl); vse16_v_f16m1(outptr0, _sum0, vl); outptr0 += packn; r0 += 2; r1 += 2; r2 += 2; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9 * packn; } } }
openmp_test.c
#include <stdio.h> #include <omp.h> int main(){ #pragma omp parallel num_threads(4) { printf("Hello world from thread %d!\n", omp_get_thread_num()); } return 0; }
three_step_v_p_strategy.h
// // Project Name: KratosPFEMFluidDynamicsApplication $ // Last modified by: $Author: AFranci $ // Date: $Date: June 2021 $ // Revision: $Revision: 0.0 $ // // #ifndef KRATOS_THREE_STEP_V_P_STRATEGY_H #define KRATOS_THREE_STEP_V_P_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "includes/cfd_variables.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" #include "custom_utilities/mesher_utilities.hpp" #include "custom_utilities/boundary_normals_calculation_utilities.hpp" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "custom_utilities/solver_settings.h" #include "custom_strategies/strategies/gauss_seidel_linear_strategy.h" #include "pfem_fluid_dynamics_application_variables.h" #include "v_p_strategy.h" #include <stdio.h> #include <math.h> namespace Kratos { ///@addtogroup PFEMFluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class ThreeStepVPStrategy : public VPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(ThreeStepVPStrategy); typedef VPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ ThreeStepVPStrategy(ModelPart &rModelPart, typename TLinearSolver::Pointer pVelocityLinearSolver, typename TLinearSolver::Pointer pPressureLinearSolver, bool ReformDofSet = true, double VelTol = 0.0001, double PresTol = 0.0001, int MaxPressureIterations = 1, // Only for predictor-corrector unsigned int TimeOrder = 2, unsigned int DomainSize = 2) : BaseType(rModelPart, pVelocityLinearSolver, pPressureLinearSolver, ReformDofSet, DomainSize), mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mReformDofSet(ReformDofSet) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme<TSparseSpace, TDenseSpace> SchemeType; typename SchemeType::Pointer pScheme; typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>()); pScheme.swap(Temp); //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver)); /* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */ this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel()); vel_build->SetCalculateReactionsFlag(false); /* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */ BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver)); this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel()); pressure_build->SetCalculateReactionsFlag(false); KRATOS_CATCH(""); } /// Destructor. virtual ~ThreeStepVPStrategy() {} int Check() override { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if (DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", ""); ModelPart &rModelPart = BaseType::GetModelPart(); const auto &r_current_process_info = rModelPart.GetProcessInfo(); for (const auto &r_element : rModelPart.Elements()) { ierr = r_element.Check(r_current_process_info); if (ierr != 0) { break; } } return ierr; KRATOS_CATCH(""); } void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } bool SolveSolutionStep() override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double currentTime = rCurrentProcessInfo[TIME]; bool converged = false; double NormV = 0; unsigned int maxNonLinearIterations = mMaxPressureIter; KRATOS_INFO("\nSolution with three_step_vp_strategy at t=") << currentTime << "s" << std::endl; bool momentumConverged = true; bool continuityConverged = false; this->SetBlockedAndIsolatedFlags(); // this->FreePressure(); for (unsigned int it = 0; it < maxNonLinearIterations; ++it) { KRATOS_INFO("\n ------------------- ITERATION ") << it << " ------------------- " << std::endl; // 1. Compute first-step velocity rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1); if (it == 0) { mpMomentumStrategy->InitializeSolutionStep(); // this->FixPressure(); } else { this->RecoverFractionalVelocity(); } momentumConverged = this->SolveFirstVelocitySystem(NormV); // 2. Pressure solution rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5); if (it == 0) { mpPressureStrategy->InitializeSolutionStep(); } continuityConverged = this->SolveContinuityIteration(); // 3. Compute end-of-step velocity this->CalculateEndOfStepVelocity(NormV); this->UpdateTopology(rModelPart, BaseType::GetEchoLevel()); if (continuityConverged && momentumConverged) { rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false); rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false); converged = true; //double tensilStressSign = -1.0; // ComputeErrorL2Norm(tensilStressSign); this->UpdateStressStrain(); KRATOS_INFO("ThreeStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl; break; } else if (it == (maxNonLinearIterations - 1) && it != 0) { //double tensilStressSign = -1.0; // ComputeErrorL2Norm(tensilStressSign); this->UpdateStressStrain(); } } if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Convergence tolerance not reached." << std::endl; if (mReformDofSet) this->Clear(); return converged; } void FinalizeSolutionStep() override { } void InitializeSolutionStep() override { } void UpdateStressStrain() override { ModelPart &rModelPart = BaseType::GetModelPart(); const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd); for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { itElem->InitializeSolutionStep(rCurrentProcessInfo); } } this->CalculateTemporalVariables(); } void CalculateTemporalVariables() override { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); for (ModelPart::NodeIterator i = rModelPart.NodesBegin(); i != rModelPart.NodesEnd(); ++i) { array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0); array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1); array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0); array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1); /* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */ if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID))) { UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity); } else if ((i)->Is(RIGID)) { array_1d<double, 3> Zeros(3, 0.0); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros; (i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros; } else { (i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0; if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION)) { array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION); (i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration; (i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME]; } } const double timeInterval = rCurrentProcessInfo[DELTA_TIME]; unsigned int timeStep = rCurrentProcessInfo[STEP]; if (timeStep == 1) { (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0; (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0; } else { double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0); double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1); double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0); double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0); CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval; CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval; CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval; } } } inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration, const array_1d<double, 3> &CurrentVelocity, array_1d<double, 3> &PreviousAcceleration, const array_1d<double, 3> &PreviousVelocity) { ModelPart &rModelPart = BaseType::GetModelPart(); ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo(); double Dt = rCurrentProcessInfo[DELTA_TIME]; // noalias(CurrentAcceleration) = 2.0 * (CurrentVelocity - PreviousVelocity) / Dt - PreviousAcceleration; // 2nd order noalias(CurrentAcceleration) = (CurrentVelocity - PreviousVelocity) / Dt; // 1st order } void Clear() override { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ void SetEchoLevel(int Level) override { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { std::stringstream buffer; buffer << "ThreeStepVPStrategy"; return buffer.str(); } /// Print information about this object. void PrintInfo(std::ostream &rOStream) const override { rOStream << "ThreeStepVPStrategy"; } /// Print object's data. void PrintData(std::ostream &rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME variables. */ bool SolveFirstVelocitySystem(double &NormV) { std::cout << "1. SolveFirstVelocitySystem " << std::endl; bool momentumConvergence = false; double NormDv = 0; // build momentum system and solve for fractional step velocity increment NormDv = mpMomentumStrategy->Solve(); // Check convergence momentumConvergence = this->CheckVelocityIncrementConvergence(NormDv, NormV); if (!momentumConvergence && BaseType::GetEchoLevel() > 0) std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl; return momentumConvergence; } bool SolveContinuityIteration() { std::cout << "2. SolveContinuityIteration " << std::endl; bool continuityConvergence = false; double NormDp = 0; NormDp = mpPressureStrategy->Solve(); continuityConvergence = CheckPressureIncrementConvergence(NormDp); // continuityConvergence = true; if (!continuityConvergence && BaseType::GetEchoLevel() > 0) std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl; return continuityConvergence; } void CalculateEndOfStepVelocity(const double NormV) { std::cout << "3. CalculateEndOfStepVelocity()" << std::endl; ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); const int n_elems = rModelPart.NumberOfElements(); array_1d<double, 3> Out = ZeroVector(3); VariableUtils().SetHistoricalVariableToZero(FRACT_VEL, rModelPart.Nodes()); VariableUtils().SetHistoricalVariableToZero(NODAL_VOLUME, rModelPart.Nodes()); #pragma omp parallel for for (int i_elem = 0; i_elem < n_elems; ++i_elem) { const auto it_elem = rModelPart.ElementsBegin() + i_elem; it_elem->Calculate(VELOCITY, Out, rModelPart.GetProcessInfo()); Element::GeometryType &geometry = it_elem->GetGeometry(); double elementalVolume = 0; if (mDomainSize == 2) { elementalVolume = geometry.Area() / 3.0; } else if (mDomainSize == 3) { elementalVolume = geometry.Volume() * 0.25; } // index = 0; unsigned int numNodes = geometry.size(); for (unsigned int i = 0; i < numNodes; i++) { double &nodalVolume = geometry(i)->FastGetSolutionStepValue(NODAL_VOLUME); nodalVolume += elementalVolume; } } rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); if (mDomainSize > 2) { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double NodalVolume = it_node->FastGetSolutionStepValue(NODAL_VOLUME); double fractionalVelocity = 0; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_X); // VELOCITY_X stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalVolume; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_X) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } if (!it_node->IsFixed(VELOCITY_Y)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_Y); // VELOCITY_Y stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalVolume; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_Y) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } if (!it_node->IsFixed(VELOCITY_Z)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_Z); // VELOCITY_Z stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_Z) += it_node->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalVolume; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_Z) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } } } } else { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; const double NodalArea = it_node->FastGetSolutionStepValue(NODAL_VOLUME); double fractionalVelocity = 0; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_X); // VELOCITY_X stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_X) += it_node->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; // here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_X) = fractionalVelocity; // now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } if (!it_node->IsFixed(VELOCITY_Y)) { fractionalVelocity = it_node->FastGetSolutionStepValue(VELOCITY_Y); // VELOCITY_Y stores the velocity after the first step computation it_node->FastGetSolutionStepValue(VELOCITY_Y) += it_node->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; //here FRACT_VEL stores the gradient of pressure computed inside the element it_node->FastGetSolutionStepValue(FRACT_VEL_Y) = fractionalVelocity; //now FRACT_VEL stores the real fractional velocity (the ones after the first step computation) } } } } this->CheckVelocityConvergence(NormV); } void RecoverFractionalVelocity() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); if (mDomainSize > 2) { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { it_node->FastGetSolutionStepValue(VELOCITY_X) = it_node->FastGetSolutionStepValue(FRACT_VEL_X); } if (!it_node->IsFixed(VELOCITY_Y)) { it_node->FastGetSolutionStepValue(VELOCITY_Y) = it_node->FastGetSolutionStepValue(FRACT_VEL_Y); } if (!it_node->IsFixed(VELOCITY_Z)) { it_node->FastGetSolutionStepValue(VELOCITY_Z) = it_node->FastGetSolutionStepValue(FRACT_VEL_Z); } } } } else { #pragma omp parallel for for (int i_node = 0; i_node < n_nodes; ++i_node) { auto it_node = rModelPart.NodesBegin() + i_node; if (it_node->IsNot(ISOLATED)) { if (!it_node->IsFixed(VELOCITY_X)) { it_node->FastGetSolutionStepValue(VELOCITY_X) = it_node->FastGetSolutionStepValue(FRACT_VEL_X); } if (!it_node->IsFixed(VELOCITY_Y)) { it_node->FastGetSolutionStepValue(VELOCITY_Y) = it_node->FastGetSolutionStepValue(FRACT_VEL_Y); } } } } } bool CheckVelocityIncrementConvergence(const double NormDv, double &NormV) { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); NormV = 0.00; double errorNormDv = 0; double temp_norm = NormV; #pragma omp parallel for reduction(+ : temp_norm) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { temp_norm += r_vel[d] * r_vel[d]; } } NormV = temp_norm; NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); const double zero_tol = 1.0e-12; errorNormDv = (NormV < zero_tol) ? NormDv : NormDv / NormV; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << "The norm of velocity increment is: " << NormDv << std::endl; std::cout << "The norm of velocity is: " << NormV << std::endl; std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl; } if (errorNormDv < mVelocityTolerance) { std::cout << "The norm of velocity is: " << NormV << " The norm of velocity increment is: " << NormDv << " Velocity error: " << errorNormDv << " Converged!" << std::endl; return true; } else { std::cout << "The norm of velocity is: " << NormV << " The norm of velocity increment is: " << NormDv << " Velocity error: " << errorNormDv << " Not converged!" << std::endl; return false; } } void CheckVelocityConvergence(const double NormOldV) { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormV = 0.00; #pragma omp parallel for reduction(+ \ : NormV) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const auto &r_vel = it_node->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) { NormV += r_vel[d] * r_vel[d]; } } NormV = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV); NormV = sqrt(NormV); std::cout << "The norm of velocity is: " << NormV << " Old velocity norm was: " << NormOldV << std::endl; } bool CheckPressureIncrementConvergence(const double NormDp) { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); double NormP = 0.00; double errorNormDp = 0; #pragma omp parallel for reduction(+ \ : NormP) for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; const double Pr = it_node->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } NormP = BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP); NormP = sqrt(NormP); const double zero_tol = 1.0e-12; errorNormDp = (NormP < zero_tol) ? NormDp : NormDp / NormP; if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) { std::cout << " The norm of pressure increment is: " << NormDp << std::endl; std::cout << " The norm of pressure is: " << NormP << std::endl; std::cout << " The norm of pressure increment is: " << NormDp << " Pressure error: " << errorNormDp << std::endl; } if (errorNormDp < mPressureTolerance) { std::cout << " The norm of pressure is: " << NormP << " The norm of pressure increment is: " << NormDp << " Pressure error: " << errorNormDp << " Converged!" << std::endl; return true; } else { std::cout << " The norm of pressure is: " << NormP << " The norm of pressure increment is: " << NormDp << " Pressure error: " << errorNormDp << " Not converged!" << std::endl; return false; } } void FixPressure() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; // if (it_node->Is(RIGID) && (it_node->X() < 0.001 || it_node->X() > 0.999)) // for closed domain case with analytical solution if (it_node->Is(FREE_SURFACE)) { it_node->FastGetSolutionStepValue(PRESSURE) = 0; it_node->Fix(PRESSURE); } } } void FreePressure() { ModelPart &rModelPart = BaseType::GetModelPart(); const int n_nodes = rModelPart.NumberOfNodes(); for (int i_node = 0; i_node < n_nodes; ++i_node) { const auto it_node = rModelPart.NodesBegin() + i_node; it_node->Free(PRESSURE); } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. ThreeStepVPStrategy &operator=(ThreeStepVPStrategy const &rOther) {} /// Copy constructor. ThreeStepVPStrategy(ThreeStepVPStrategy const &rOther) {} ///@} }; /// Class ThreeStepVPStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_THREE_STEP_V_P_STRATEGY_H
GB_unop__identity_fp64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_fc64) // op(A') function: GB (_unop_tran__identity_fp64_fc64) // C type: double // A type: GxB_FC64_t // cast: double cij = (double) creal (aij) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) creal (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) creal (aij) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_fc64) ( double *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; double z = (double) creal (aij) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; double z = (double) creal (aij) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveBlurImage) #endif proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveSharpenImage) #endif proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*(columns+2)+x_offset); s=q-(y_offset*(columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_KuwaharaImage) #endif proceed=SetImageProgress(image,KuwaharaImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)* mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MotionBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; extern const char DefaultTileFrame[]; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; geometry.width=(size_t) (2*i+2); geometry.height=(size_t) (2*i+2); geometry.x=(i-1)/2; geometry.y=(i-1)/2; (void) RaiseImage(preview_image,&geometry,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RotationalBlurImage) #endif proceed=SetImageProgress(image,BlurImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SelectiveBlurImage) #endif proceed=SetImageProgress(image,SelectiveBlurImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,center+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetPixelIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetPixelIntensity(linear_image,post)+ GetPixelIntensity(linear_image,post+GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetPixelIntensity(linear_image,pre)- GetPixelIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ShadeImage) #endif proceed=SetImageProgress(image,ShadeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SpreadImage) #endif proceed=SetImageProgress(image,SpreadImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UnsharpMaskImage) #endif proceed=SetImageProgress(image,SharpenImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
ZQ_CNN_MTCNN_ncnn.h
#ifndef _ZQ_CNN_MTCNN_H_ #define _ZQ_CNN_MTCNN_H_ #pragma once #include "net.h" #include <algorithm> #include <omp.h> #ifndef __max #define __max(x,y) ((x>y)?(x):(y)) #endif #ifndef __min #define __min(x,y) ((x<y)?(x):(y)) #endif namespace ZQ { class ZQ_CNN_MTCNN_ncnn { public: class ZQ_CNN_BBox { public: float score; int row1; int col1; int row2; int col2; float area; bool exist; bool need_check_overlap_count; float ppoint[10]; float regreCoord[4]; ZQ_CNN_BBox() { memset(this, 0, sizeof(ZQ_CNN_BBox)); } ~ZQ_CNN_BBox() {} bool ReadFromBinary(FILE* in) { if (fread(this, sizeof(ZQ_CNN_BBox), 1, in) != 1) return false; return true; } bool WriteBinary(FILE* out) const { if (fwrite(this, sizeof(ZQ_CNN_BBox), 1, out) != 1) return false; return true; } }; class ZQ_CNN_BBox106 { public: float score; int row1; int col1; int row2; int col2; float area; bool exist; bool need_check_overlap_count; float ppoint[212]; float regreCoord[4]; ZQ_CNN_BBox106() { memset(this, 0, sizeof(ZQ_CNN_BBox106)); } ~ZQ_CNN_BBox106() {} bool ReadFromBinary(FILE* in) { if (fread(this, sizeof(ZQ_CNN_BBox106), 1, in) != 1) return false; return true; } bool WriteBinary(FILE* out) const { if (fwrite(this, sizeof(ZQ_CNN_BBox106), 1, out) != 1) return false; return true; } }; class ZQ_CNN_OrderScore { public: float score; int oriOrder; ZQ_CNN_OrderScore() { memset(this, 0, sizeof(ZQ_CNN_OrderScore)); } }; static bool _cmp_score(const ZQ_CNN_OrderScore& lsh, const ZQ_CNN_OrderScore& rsh) { return lsh.score < rsh.score; } static void _nms(std::vector<ZQ_CNN_BBox> &boundingBox, std::vector<ZQ_CNN_OrderScore> &bboxScore, const float overlap_threshold, const std::string& modelname = "Union", int overlap_count_thresh = 0) { if (boundingBox.empty() || overlap_threshold >= 1.0) { return; } std::vector<int> heros; std::vector<int> overlap_num; //sort the score sort(bboxScore.begin(), bboxScore.end(), _cmp_score); int order = 0; float IOU = 0; float maxX = 0; float maxY = 0; float minX = 0; float minY = 0; while (bboxScore.size() > 0) { order = bboxScore.back().oriOrder; bboxScore.pop_back(); if (order < 0)continue; heros.push_back(order); int cur_overlap = 0; boundingBox[order].exist = false;//delete it int box_num = boundingBox.size(); for (int num = 0; num < box_num; num++) { if (boundingBox[num].exist) { //the iou maxY = __max(boundingBox[num].row1, boundingBox[order].row1); maxX = __max(boundingBox[num].col1, boundingBox[order].col1); minY = __min(boundingBox[num].row2, boundingBox[order].row2); minX = __min(boundingBox[num].col2, boundingBox[order].col2); //maxX1 and maxY1 reuse maxX = __max(minX - maxX + 1, 0); maxY = __max(minY - maxY + 1, 0); //IOU reuse for the area of two bbox IOU = maxX * maxY; float area1 = boundingBox[num].area; float area2 = boundingBox[order].area; if (!modelname.compare("Union")) IOU = IOU / (area1 + area2 - IOU); else if (!modelname.compare("Min")) { IOU = IOU / __min(area1, area2); } if (IOU > overlap_threshold) { cur_overlap++; boundingBox[num].exist = false; for (std::vector<ZQ_CNN_OrderScore>::iterator it = bboxScore.begin(); it != bboxScore.end(); it++) { if ((*it).oriOrder == num) { (*it).oriOrder = -1; break; } } } } } overlap_num.push_back(cur_overlap); } for (int i = 0; i < heros.size(); i++) { if (!boundingBox[heros[i]].need_check_overlap_count || overlap_num[i] >= overlap_count_thresh) boundingBox[heros[i]].exist = true; } //clear exist= false; for (int i = boundingBox.size() - 1; i >= 0; i--) { if (!boundingBox[i].exist) { boundingBox.erase(boundingBox.begin() + i); } } } static void _refine_and_square_bbox(std::vector<ZQ_CNN_BBox> &vecBbox, const int width, const int height, bool square = true) { float bbw = 0, bbh = 0, bboxSize = 0; float h = 0, w = 0; float x1 = 0, y1 = 0, x2 = 0, y2 = 0; for (std::vector<ZQ_CNN_BBox>::iterator it = vecBbox.begin(); it != vecBbox.end(); it++) { if ((*it).exist) { bbh = (*it).row2 - (*it).row1 + 1; bbw = (*it).col2 - (*it).col1 + 1; y1 = (*it).row1 + (*it).regreCoord[1] * bbh; x1 = (*it).col1 + (*it).regreCoord[0] * bbw; y2 = (*it).row2 + (*it).regreCoord[3] * bbh; x2 = (*it).col2 + (*it).regreCoord[2] * bbw; w = x2 - x1 + 1; h = y2 - y1 + 1; if (square) { bboxSize = (h > w) ? h : w; y1 = y1 + h*0.5 - bboxSize*0.5; x1 = x1 + w*0.5 - bboxSize*0.5; (*it).row2 = round(y1 + bboxSize - 1); (*it).col2 = round(x1 + bboxSize - 1); (*it).row1 = round(y1); (*it).col1 = round(x1); } else { (*it).row2 = round(y1 + h - 1); (*it).col2 = round(x1 + w - 1); (*it).row1 = round(y1); (*it).col1 = round(x1); } //boundary check /*if ((*it).row1 < 0)(*it).row1 = 0; if ((*it).col1 < 0)(*it).col1 = 0; if ((*it).row2 > height)(*it).row2 = height - 1; if ((*it).col2 > width)(*it).col2 = width - 1;*/ it->area = (it->row2 - it->row1)*(it->col2 - it->col1); } } } static void _square_bbox(std::vector<ZQ_CNN_BBox> &vecBbox, const int width, const int height) { float bbw = 0, bbh = 0, bboxSize = 0; float h = 0, w = 0; float x1 = 0, y1 = 0, x2 = 0, y2 = 0; for (std::vector<ZQ_CNN_BBox>::iterator it = vecBbox.begin(); it != vecBbox.end(); it++) { if ((*it).exist) { y1 = (*it).row1; x1 = (*it).col1; h = (*it).row2 - (*it).row1 + 1; w = (*it).col2 - (*it).col1 + 1; bboxSize = (h > w) ? h : w; y1 = y1 + h*0.5 - bboxSize*0.5; x1 = x1 + w*0.5 - bboxSize*0.5; (*it).row2 = round(y1 + bboxSize - 1); (*it).col2 = round(x1 + bboxSize - 1); (*it).row1 = round(y1); (*it).col1 = round(x1); //boundary check /*if ((*it).row1 < 0)(*it).row1 = 0; if ((*it).col1 < 0)(*it).col1 = 0; if ((*it).row2 > height)(*it).row2 = height - 1; if ((*it).col2 > width)(*it).col2 = width - 1;*/ it->area = (it->row2 - it->row1)*(it->col2 - it->col1); } } } public: ZQ_CNN_MTCNN_ncnn() { min_size = 60; thresh[0] = 0.6; thresh[1] = 0.7; thresh[2] = 0.7; nms_thresh[0] = 0.6; nms_thresh[1] = 0.7; nms_thresh[2] = 0.7; width = 0; height = 0; factor = 0.709; pnet_overlap_thresh_count = 4; pnet_size = 12; pnet_stride = 2; special_handle_very_big_face = false; force_run_pnet_multithread = false; show_debug_info = false; limit_r_num = 0; limit_o_num = 0; limit_l_num = 0; } ~ZQ_CNN_MTCNN_ncnn() { } private: std::vector<ncnn::Net> pnet, rnet, onet, lnet; std::vector<ncnn::UnlockedPoolAllocator> g_blob_pool_allocator; std::vector<ncnn::UnlockedPoolAllocator> g_workspace_pool_allocator; bool has_lnet; int thread_num; float thresh[3], nms_thresh[3]; int min_size; int width, height; float factor; int pnet_overlap_thresh_count; int pnet_size; int pnet_stride; int rnet_size; int onet_size; int lnet_size; bool special_handle_very_big_face; bool do_landmark; float early_accept_thresh; float nms_thresh_per_scale; bool force_run_pnet_multithread; std::vector<float> scales; std::vector<ncnn::Mat> pnet_images; ncnn::Mat input, rnet_image, onet_image; bool show_debug_info; int limit_r_num; int limit_o_num; int limit_l_num; public: void TurnOnShowDebugInfo() { show_debug_info = true; } void TurnOffShowDebugInfo() { show_debug_info = false; } void SetLimit(int limit_r = 0, int limit_o = 0, int limit_l = 0) { limit_r_num = limit_r; limit_o_num = limit_o; limit_l_num = limit_l; } private: static bool _load(ncnn::Net& net, const std::string& param, const std::string& model) { if (-1 == net.load_param(param.c_str())) return false; if (-1 == net.load_model(model.c_str())) return false; return true; } static bool _roi(const ncnn::Mat& input, ncnn::Mat& output, int off_x, int off_y, int width, int height) { if (off_x >= 0 && off_y >= 0 && width > 0 && height > 0 && off_x + width <= input.w && off_y + height <= input.h) { copy_cut_border(input, output, off_y, input.h - off_y - height, off_x, input.w - off_x - width); return true; } else return false; } public: bool Init(const std::string& pnet_param, const std::string& pnet_model, const std::string& rnet_param, const std::string& rnet_model, const std::string& onet_param, const std::string& onet_model, int thread_num = 1, bool has_lnet = false, const std::string& lnet_param = "", const std::string& lnet_model = "") { if (thread_num < 1) force_run_pnet_multithread = true; else force_run_pnet_multithread = false; thread_num = __max(1, thread_num); pnet.resize(thread_num); rnet.resize(thread_num); onet.resize(thread_num); this->has_lnet = has_lnet; if (has_lnet) { lnet.resize(thread_num); } g_blob_pool_allocator.resize(thread_num); g_workspace_pool_allocator.resize(thread_num); bool ret = true; for (int i = 0; i < thread_num; i++) { ret = _load(pnet[i], pnet_param, pnet_model) && _load(rnet[i], rnet_param, rnet_model) && _load(onet[i], onet_param, onet_model); if (has_lnet && ret) ret = _load(lnet[i], lnet_param, lnet_model); if (!ret) break; } if (!ret) { pnet.clear(); rnet.clear(); onet.clear(); if (has_lnet) lnet.clear(); this->thread_num = 0; } else this->thread_num = thread_num; for (int i = 0; i < thread_num; i++) { g_blob_pool_allocator[i].clear(); g_workspace_pool_allocator[i].clear(); } return ret; } void SetPara(int w, int h, int min_face_size = 60, float pthresh = 0.6, float rthresh = 0.7, float othresh = 0.7, float nms_pthresh = 0.6, float nms_rthresh = 0.7, float nms_othresh = 0.7, float scale_factor = 0.709, int pnet_overlap_thresh_count = 4, int pnet_size = 12, int pnet_stride = 2, bool special_handle_very_big_face = false, bool do_landmark = true, float early_accept_thresh = 1.00) { min_size = __max(pnet_size, min_face_size); thresh[0] = __max(0.1, pthresh); thresh[1] = __max(0.1, rthresh); thresh[2] = __max(0.1, othresh); nms_thresh[0] = __max(0.1, nms_pthresh); nms_thresh[1] = __max(0.1, nms_rthresh); nms_thresh[2] = __max(0.1, nms_othresh); scale_factor = __max(0.5, __min(0.97, scale_factor)); this->pnet_overlap_thresh_count = __max(0, pnet_overlap_thresh_count); this->pnet_size = pnet_size; this->pnet_stride = pnet_stride; this->special_handle_very_big_face = special_handle_very_big_face; this->do_landmark = do_landmark; this->early_accept_thresh = early_accept_thresh; if (pnet_size == 20 && pnet_stride == 4) nms_thresh_per_scale = 0.45; else nms_thresh_per_scale = 0.495; if (width != w || height != h || factor != scale_factor) { scales.clear(); pnet_images.clear(); width = w; height = h; float minside = __min(width, height); int MIN_DET_SIZE = pnet_size; float m = (float)MIN_DET_SIZE / min_size; minside *= m; while (minside > MIN_DET_SIZE) { scales.push_back(m); minside *= factor; m *= factor; } minside = __min(width, height); int count = scales.size(); for (int i = scales.size() - 1; i >= 0; i--) { if (ceil(scales[i] * minside) <= pnet_size) { count--; } } if (special_handle_very_big_face) { if (count > 2) count--; scales.resize(count); if (count > 0) { float last_size = ceil(scales[count - 1] * minside); for (int tmp_size = last_size - 1; tmp_size >= pnet_size + 1; tmp_size -= 2) { scales.push_back((float)tmp_size / minside); count++; } } scales.push_back((float)pnet_size / minside); count++; } else { scales.push_back((float)pnet_size / minside); count++; } pnet_images.resize(count); } } bool Find(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& results) { double t1 = omp_get_wtime(); std::vector<ZQ_CNN_BBox> firstBbox, secondBbox, thirdBbox; if (!_Pnet_stage(bgr_img, _width, _height, _widthStep, firstBbox)) return false; //results = firstBbox; //return true; if (limit_r_num > 0) { _select(firstBbox, limit_r_num, _width, _height); } double t2 = omp_get_wtime(); if (!_Rnet_stage(firstBbox, secondBbox)) return false; //results = secondBbox; //return true; if (limit_o_num > 0) { _select(secondBbox, limit_o_num, _width, _height); } double t3 = omp_get_wtime(); if (!_Onet_stage(secondBbox, results)) return false; double t4 = omp_get_wtime(); if (show_debug_info) { printf("final found num: %d\n", (int)results.size()); printf("total cost: %.3f ms (P: %.3f ms, R: %.3f ms, O: %.3f ms)\n", 1000 * (t4 - t1), 1000 * (t2 - t1), 1000 * (t3 - t2), 1000 * (t4 - t3)); } return true; } private: void _compute_Pnet_single_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } for (int i = 0; i < scale_num; i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; double t10 = omp_get_wtime(); if (scales[i] != 1) { ncnn::resize_bilinear(input, pnet_images[i], changedW, changedH); } double t11 = omp_get_wtime(); ncnn::Extractor ex = pnet[0].create_extractor(); ex.set_light_mode(true); ex.set_blob_allocator(&g_blob_pool_allocator[0]); ex.set_workspace_allocator(&g_workspace_pool_allocator[0]); ex.set_num_threads(1); if (scales[i] == 1) ex.input("data", input); else ex.input("data", pnet_images[i]); ncnn::Mat score, location; ex.extract("prob1", score); ex.extract("conv4-2", location); double t12 = omp_get_wtime(); if (show_debug_info) printf("Pnet [%d]: resolution [%dx%d], resize:%.3f ms, cost:%.3f ms\n", i, changedW, changedH, 1000 * (t11 - t10), 1000 * (t12 - t11)); //score p float *p = score.channel(1); int scoreH = score.h; int scoreW = score.w; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (row < mapH[i] && col < mapW[i]) maps[i][row*mapW[i] + col] = *p; p++; } } } } void _compute_Pnet_multi_thread(std::vector<std::vector<float> >& maps, std::vector<int>& mapH, std::vector<int>& mapW) { if (thread_num <= 1) { for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { ncnn::resize_bilinear(input, pnet_images[i], changedW, changedH); } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; if (scales[i] != 1) { ncnn::resize_bilinear(input, pnet_images[i], changedW, changedH); } } } int scale_num = 0; for (int i = 0; i < scales.size(); i++) { int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; scale_num++; mapH.push_back((changedH - pnet_size) / pnet_stride + 1); mapW.push_back((changedW - pnet_size) / pnet_stride + 1); } maps.resize(scale_num); for (int i = 0; i < scale_num; i++) { maps[i].resize(mapH[i] * mapW[i]); } std::vector<int> task_rect_off_x; std::vector<int> task_rect_off_y; std::vector<int> task_rect_width; std::vector<int> task_rect_height; std::vector<float> task_scale; std::vector<int> task_scale_id; int stride = pnet_stride; const int block_size = 64 * stride; int cellsize = pnet_size; int border_size = cellsize - stride; int overlap_border_size = cellsize / stride; int jump_size = block_size - border_size; for (int i = 0; i < scales.size(); i++) { int changeH = (int)ceil(height*scales[i]); int changeW = (int)ceil(width*scales[i]); if (changeH < pnet_size || changeW < pnet_size) continue; int block_H_num = 0; int block_W_num = 0; int start = 0; while (start < changeH) { block_H_num++; if (start + block_size >= changeH) break; start += jump_size; } start = 0; while (start < changeW) { block_W_num++; if (start + block_size >= changeW) break; start += jump_size; } for (int s = 0; s < block_H_num; s++) { for (int t = 0; t < block_W_num; t++) { int rect_off_x = t * jump_size; int rect_off_y = s * jump_size; int rect_width = __min(changeW, rect_off_x + block_size) - rect_off_x; int rect_height = __min(changeH, rect_off_y + block_size) - rect_off_y; if (rect_width >= cellsize && rect_height >= cellsize) { task_rect_off_x.push_back(rect_off_x); task_rect_off_y.push_back(rect_off_y); task_rect_width.push_back(rect_width); task_rect_height.push_back(rect_height); task_scale.push_back(scales[i]); task_scale_id.push_back(i); } } } } // int task_num = task_scale.size(); std::vector<ncnn::Mat> task_pnet_images(thread_num); if (thread_num <= 1) { for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!_roi(input, task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height)) continue; } else { if (!_roi(pnet_images[scale_id], task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height)) continue; } ncnn::Extractor ex = pnet[thread_id].create_extractor(); ex.set_light_mode(true); ex.set_blob_allocator(&g_blob_pool_allocator[0]); ex.set_workspace_allocator(&g_workspace_pool_allocator[0]); ex.set_num_threads(1); ex.input("data", task_pnet_images[thread_id]); ncnn::Mat score, location; ex.extract("prob1", score); ex.extract("conv4-2", location); //score p float *p = score.channel(1); int scoreH = score.h; int scoreW = score.w; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p++; } } } } else { #pragma omp parallel for num_threads(thread_num) for (int i = 0; i < task_num; i++) { int thread_id = omp_get_thread_num(); int scale_id = task_scale_id[i]; float cur_scale = task_scale[i]; int i_rect_off_x = task_rect_off_x[i]; int i_rect_off_y = task_rect_off_y[i]; int i_rect_width = task_rect_width[i]; int i_rect_height = task_rect_height[i]; if (scale_id == 0 && scales[0] == 1) { if (!_roi(input, task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height)) continue; } else { if (!_roi(pnet_images[scale_id], task_pnet_images[thread_id], i_rect_off_x, i_rect_off_y, i_rect_width, i_rect_height)) continue; } ncnn::Extractor ex = pnet[thread_id].create_extractor(); ex.set_light_mode(true); ex.set_blob_allocator(&g_blob_pool_allocator[thread_id]); ex.set_workspace_allocator(&g_workspace_pool_allocator[thread_id]); ex.input("data", task_pnet_images[thread_id]); ncnn::Mat score, location; ex.extract("prob1", score); ex.extract("conv4-2", location); //score p float *p = score.channel(1); int scoreH = score.h; int scoreW = score.w; ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { int real_row = row + i_rect_off_y / stride; int real_col = col + i_rect_off_x / stride; if (real_row < mapH[scale_id] && real_col < mapW[scale_id]) maps[scale_id][real_row*mapW[scale_id] + real_col] = *p; p++; } } } } } bool _Pnet_stage(const unsigned char* bgr_img, int _width, int _height, int _widthStep, std::vector<ZQ_CNN_BBox>& firstBbox) { if (thread_num <= 0) return false; double t1 = omp_get_wtime(); firstBbox.clear(); if (width != _width || height != _height) return false; input = ncnn::Mat::from_pixels(bgr_img, ncnn::Mat::PIXEL_BGR, _width, _height); float mean_vals[3] = { 127.5,127.5,127.5 }; float norm_vals[3] = { 1.0 / 128,1.0 / 128,1.0 / 128 }; input.substract_mean_normalize(mean_vals, norm_vals); double t2 = omp_get_wtime(); if (show_debug_info) printf("convert cost: %.3f ms\n", 1000 * (t2 - t1)); std::vector<std::vector<float> > maps; std::vector<int> mapH; std::vector<int> mapW; if (thread_num == 1 && !force_run_pnet_multithread) { _compute_Pnet_single_thread(maps, mapH, mapW); } else { _compute_Pnet_multi_thread(maps, mapH, mapW); } ZQ_CNN_OrderScore order; std::vector<std::vector<ZQ_CNN_BBox> > bounding_boxes(scales.size()); std::vector<std::vector<ZQ_CNN_OrderScore> > bounding_scores(scales.size()); const int block_size = 32; int stride = pnet_stride; int cellsize = pnet_size; int border_size = cellsize / stride; for (int i = 0; i < maps.size(); i++) { double t13 = omp_get_wtime(); int changedH = (int)ceil(height*scales[i]); int changedW = (int)ceil(width*scales[i]); if (changedH < pnet_size || changedW < pnet_size) continue; float cur_scale_x = (float)width / changedW; float cur_scale_y = (float)height / changedH; int count = 0; //score p int scoreH = mapH[i]; int scoreW = mapW[i]; const float *p = &maps[i][0]; if (scoreW <= block_size && scoreH < block_size) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; for (int row = 0; row < scoreH; row++) { for (int col = 0; col < scoreW; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bounding_boxes[i].push_back(bbox); bounding_scores[i].push_back(order); count++; } p++; } } int before_count = bounding_boxes[i].size(); _nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } else { int before_count = 0, after_count = 0; int block_H_num = __max(1, scoreH / block_size); int block_W_num = __max(1, scoreW / block_size); int block_num = block_H_num*block_W_num; int width_per_block = scoreW / block_W_num; int height_per_block = scoreH / block_H_num; std::vector<std::vector<ZQ_CNN_BBox> > tmp_bounding_boxes(block_num); std::vector<std::vector<ZQ_CNN_OrderScore> > tmp_bounding_scores(block_num); std::vector<int> block_start_w(block_num), block_end_w(block_num); std::vector<int> block_start_h(block_num), block_end_h(block_num); for (int bh = 0; bh < block_H_num; bh++) { for (int bw = 0; bw < block_W_num; bw++) { int bb = bh * block_W_num + bw; block_start_w[bb] = (bw == 0) ? 0 : (bw*width_per_block - border_size); block_end_w[bb] = (bw == block_num - 1) ? scoreW : ((bw + 1)*width_per_block); block_start_h[bb] = (bh == 0) ? 0 : (bh*height_per_block - border_size); block_end_h[bb] = (bh == block_num - 1) ? scoreH : ((bh + 1)*height_per_block); } } int chunk_size = 1; if (thread_num <= 1) { for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); _nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } else { #pragma omp parallel for schedule(dynamic, chunk_size) num_threads(thread_num) for (int bb = 0; bb < block_num; bb++) { ZQ_CNN_BBox bbox; ZQ_CNN_OrderScore order; int count = 0; for (int row = block_start_h[bb]; row < block_end_h[bb]; row++) { const float* p = &maps[i][0] + row*scoreW + block_start_w[bb]; for (int col = block_start_w[bb]; col < block_end_w[bb]; col++) { if (*p > thresh[0]) { bbox.score = *p; order.score = *p; order.oriOrder = count; bbox.row1 = stride*row; bbox.col1 = stride*col; bbox.row2 = stride*row + cellsize; bbox.col2 = stride*col + cellsize; bbox.exist = true; bbox.need_check_overlap_count = (row >= border_size && row < scoreH - border_size) && (col >= border_size && col < scoreW - border_size); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); tmp_bounding_boxes[bb].push_back(bbox); tmp_bounding_scores[bb].push_back(order); count++; } p++; } } int tmp_before_count = tmp_bounding_boxes[bb].size(); _nms(tmp_bounding_boxes[bb], tmp_bounding_scores[bb], nms_thresh_per_scale, "Union", pnet_overlap_thresh_count); int tmp_after_count = tmp_bounding_boxes[bb].size(); before_count += tmp_before_count; after_count += tmp_after_count; } } count = 0; for (int bb = 0; bb < block_num; bb++) { std::vector<ZQ_CNN_BBox>::iterator it = tmp_bounding_boxes[bb].begin(); for (; it != tmp_bounding_boxes[bb].end(); it++) { if ((*it).exist) { bounding_boxes[i].push_back(*it); order.score = (*it).score; order.oriOrder = count; bounding_scores[i].push_back(order); count++; } } } //ZQ_CNN_BBoxUtils::_nms(bounding_boxes[i], bounding_scores[i], nms_thresh_per_scale, "Union", 0); after_count = bounding_boxes[i].size(); for (int j = 0; j < after_count; j++) { ZQ_CNN_BBox& bbox = bounding_boxes[i][j]; bbox.row1 = round(bbox.row1 *cur_scale_y); bbox.col1 = round(bbox.col1 *cur_scale_x); bbox.row2 = round(bbox.row2 *cur_scale_y); bbox.col2 = round(bbox.col2 *cur_scale_x); bbox.area = (bbox.row2 - bbox.row1)*(bbox.col2 - bbox.col1); } double t14 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms, (%d-->%d)\n", 1000 * (t14 - t13), before_count, after_count); } } std::vector<ZQ_CNN_OrderScore> firstOrderScore; int count = 0; for (int i = 0; i < scales.size(); i++) { std::vector<ZQ_CNN_BBox>::iterator it = bounding_boxes[i].begin(); for (; it != bounding_boxes[i].end(); it++) { if ((*it).exist) { firstBbox.push_back(*it); order.score = (*it).score; order.oriOrder = count; firstOrderScore.push_back(order); count++; } } } //the first stage's nms if (count < 1) return false; double t15 = omp_get_wtime(); _nms(firstBbox, firstOrderScore, nms_thresh[0], "Union", 0); _refine_and_square_bbox(firstBbox, width, height, true); double t16 = omp_get_wtime(); if (show_debug_info) printf("nms cost: %.3f ms\n", 1000 * (t16 - t15)); if (show_debug_info) printf("first stage candidate count: %d\n", count); double t3 = omp_get_wtime(); if (show_debug_info) printf("stage 1: cost %.3f ms\n", 1000 * (t3 - t2)); return true; } bool _Rnet_stage(std::vector<ZQ_CNN_BBox>& firstBbox, std::vector<ZQ_CNN_BBox>& secondBbox) { double t3 = omp_get_wtime(); secondBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = firstBbox.begin(); std::vector<ZQ_CNN_OrderScore> secondScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int r_count = 0; for (; it != firstBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); r_count++; secondBbox.push_back(*it); } } } secondBbox.resize(r_count); if (thread_num <= 1) { for (int pp = 0; pp < r_count; pp++) { ncnn::Mat task_rnet_images; ncnn::Mat tempIm; copy_cut_border(input, tempIm, src_off_y[pp], input.h - src_off_y[pp] - src_rect_h[pp], src_off_x[pp], input.w - src_off_x[pp] - src_rect_w[pp]); resize_bilinear(tempIm, task_rnet_images, 24, 24); ncnn::Extractor ex = rnet[0].create_extractor(); ex.set_light_mode(true); ex.set_blob_allocator(&g_blob_pool_allocator[0]); ex.set_workspace_allocator(&g_workspace_pool_allocator[0]); ex.set_num_threads(1); ex.input("data", task_rnet_images); ncnn::Mat score, bbox, keyPoint; ex.extract("prob1", score); ex.extract("conv5-2", bbox); if ((float)score[1] > thresh[1]) { for (int j = 0; j < 4; j++) secondBbox[pp].regreCoord[j] = (float)bbox[j]; secondBbox[pp].area = src_rect_w[pp] * src_rect_h[pp]; secondBbox[pp].score = (float)score[1]; } else { secondBbox[pp].exist = false; } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < r_count; pp++) { int thread_id = omp_get_thread_num(); ncnn::Mat task_rnet_images; ncnn::Mat tempIm; copy_cut_border(input, tempIm, src_off_y[pp], input.h - src_off_y[pp] - src_rect_h[pp], src_off_x[pp], input.w - src_off_x[pp] - src_rect_w[pp]); resize_bilinear(tempIm, task_rnet_images, 24, 24); ncnn::Extractor ex = rnet[thread_id].create_extractor(); ex.set_light_mode(true); ex.set_blob_allocator(&g_blob_pool_allocator[thread_id]); ex.set_workspace_allocator(&g_workspace_pool_allocator[thread_id]); ex.set_num_threads(1); ex.input("data", task_rnet_images); ncnn::Mat score, bbox, keyPoint; ex.extract("prob1", score); ex.extract("conv5-2", bbox); if ((float)score[1] > thresh[1]) { for (int j = 0; j < 4; j++) secondBbox[pp].regreCoord[j] = (float)bbox[j]; secondBbox[pp].area = src_rect_w[pp] * src_rect_h[pp]; secondBbox[pp].score = (float)score[1]; } else { secondBbox[pp].exist = false; } } } for (int i = secondBbox.size() - 1; i >= 0; i--) { if (!secondBbox[i].exist) secondBbox.erase(secondBbox.begin() + i); } int count = secondBbox.size(); secondScore.resize(count); for (int i = 0; i < count; i++) { secondScore[i].score = secondBbox[i].score; secondScore[i].oriOrder = i; } //_nms(secondBbox, secondScore, nms_thresh[1], "Union"); _nms(secondBbox, secondScore, nms_thresh[1], "Min"); _refine_and_square_bbox(secondBbox, width, height, true); count = secondBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Rnet [%d] times, candidate after nms: %d \n", r_count, count); if (show_debug_info) printf("stage 2: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } bool _Onet_stage(std::vector<ZQ_CNN_BBox>& secondBbox, std::vector<ZQ_CNN_BBox>& thirdBbox) { double t3 = omp_get_wtime(); thirdBbox.clear(); std::vector<ZQ_CNN_BBox>::iterator it = secondBbox.begin(); std::vector<ZQ_CNN_OrderScore> thirdScore; std::vector<int> src_off_x, src_off_y, src_rect_w, src_rect_h; int o_count = 0; for (; it != secondBbox.end(); it++) { if ((*it).exist) { int off_x = it->col1; int off_y = it->row1; int rect_w = it->col2 - off_x; int rect_h = it->row2 - off_y; if (/*off_x < 0 || off_x + rect_w > width || off_y < 0 || off_y + rect_h > height ||*/ rect_w <= 0.5*min_size || rect_h <= 0.5*min_size) { (*it).exist = false; continue; } else { src_off_x.push_back(off_x); src_off_y.push_back(off_y); src_rect_w.push_back(rect_w); src_rect_h.push_back(rect_h); o_count++; thirdBbox.push_back(*it); } } } thirdBbox.resize(o_count); if (thread_num <= 1) { for (int pp = 0; pp < o_count; pp++) { ncnn::Mat task_onet_images; ncnn::Mat tempIm; copy_cut_border(input, tempIm, src_off_y[pp], input.h - src_off_y[pp] - src_rect_h[pp], src_off_x[pp], input.w - src_off_x[pp] - src_rect_w[pp]); resize_bilinear(tempIm, task_onet_images, 48, 48); ncnn::Extractor ex = onet[0].create_extractor(); ex.set_light_mode(true); ex.set_blob_allocator(&g_blob_pool_allocator[0]); ex.set_workspace_allocator(&g_workspace_pool_allocator[0]); ex.set_num_threads(1); ex.input("data", task_onet_images); ncnn::Mat score, bbox, keyPoint; ex.extract("prob1", score); ex.extract("conv6-2", bbox); if ((float)score[1] > thresh[2]) { for (int j = 0; j < 4; j++) thirdBbox[pp].regreCoord[j] = (float)bbox[j]; thirdBbox[pp].area = src_rect_w[pp] * src_rect_h[pp]; thirdBbox[pp].score = (float)score[1]; } else { thirdBbox[pp].exist = false; } } } else { #pragma omp parallel for num_threads(thread_num) schedule(dynamic,1) for (int pp = 0; pp < o_count; pp++) { int thread_id = omp_get_thread_num(); ncnn::Mat task_onet_images; ncnn::Mat tempIm; copy_cut_border(input, tempIm, src_off_y[pp], input.h - src_off_y[pp] - src_rect_h[pp], src_off_x[pp], input.w - src_off_x[pp] - src_rect_w[pp]); resize_bilinear(tempIm, task_onet_images, 48, 48); ncnn::Extractor ex = onet[thread_id].create_extractor(); ex.set_light_mode(true); ex.set_blob_allocator(&g_blob_pool_allocator[thread_id]); ex.set_workspace_allocator(&g_workspace_pool_allocator[thread_id]); ex.set_num_threads(1); ex.input("data", task_onet_images); ncnn::Mat score, bbox, keyPoint; ex.extract("prob1", score); ex.extract("conv6-2", bbox); if ((float)score[1] > thresh[2]) { for (int j = 0; j < 4; j++) thirdBbox[pp].regreCoord[j] = (float)bbox[j]; thirdBbox[pp].area = src_rect_w[pp] * src_rect_h[pp]; thirdBbox[pp].score = (float)score[1]; } else { thirdBbox[pp].exist = false; } } } for (int i = thirdBbox.size() - 1; i >= 0; i--) { if (!thirdBbox[i].exist) thirdBbox.erase(thirdBbox.begin() + i); } int count = thirdBbox.size(); thirdScore.resize(count); for (int i = 0; i < count; i++) { thirdScore[i].score = thirdScore[i].score; thirdScore[i].oriOrder = i; } _nms(thirdBbox, thirdScore, nms_thresh[2], "Min"); _refine_and_square_bbox(thirdBbox, width, height, true); count = thirdBbox.size(); double t4 = omp_get_wtime(); if (show_debug_info) printf("run Onet [%d] times, candidate after nms: %d \n", o_count, count); if (show_debug_info) printf("stage 3: cost %.3f ms\n", 1000 * (t4 - t3)); return true; } void _select(std::vector<ZQ_CNN_BBox>& bbox, int limit_num, int width, int height) { int in_num = bbox.size(); if (limit_num >= in_num) return; bbox.resize(limit_num); } }; } #endif
GB_binop__lxor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint64) // A*D function (colscale): GB (_AxD__lxor_uint64) // D*A function (rowscale): GB (_DxB__lxor_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint64) // C=scalar+B GB (_bind1st__lxor_uint64) // C=scalar+B' GB (_bind1st_tran__lxor_uint64) // C=A+scalar GB (_bind2nd__lxor_uint64) // C=A'+scalar GB (_bind2nd_tran__lxor_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_UINT64 || GxB_NO_LXOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__lxor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lxor_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lxor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Efficient_RANSAC.h
// Copyright (c) 2015 INRIA Sophia-Antipolis (France). // All rights reserved. // // This file is part of CGAL (www.cgal.org). // // $URL$ // $Id$ // SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial // // // Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez // #ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H #include <CGAL/license/Shape_detection.h> #include <CGAL/Random.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h> #include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h> // for octree ------------------------------ #include <boost/iterator/filter_iterator.hpp> #include <CGAL/bounding_box.h> #include <CGAL/Iterator_range.h> //---------- #include <vector> #include <cmath> #include <limits> #include <fstream> #include <sstream> #include <functional> // boost -------------- #include <CGAL/boost/iterator/counting_iterator.hpp> #include <boost/shared_ptr.hpp> #include <boost/make_shared.hpp> //--------------------- namespace CGAL { namespace Shape_detection { /*! \ingroup PkgShapeDetectionRANSAC \brief Shape detection algorithm based on the RANSAC method. Given a point set in 3D space with unoriented normals, sampled on surfaces, this class enables to detect subsets of connected points lying on the surface of primitive shapes. Each input point is assigned to either none or at most one detected primitive shape. The implementation follows \cgalCite{schnabel2007efficient}. \tparam Traits must be a model of `EfficientRANSACTraits`. */ template <class Traits> class Efficient_RANSAC { public: /// \cond SKIP_IN_MANUAL struct Filter_unassigned_points { Filter_unassigned_points() : m_shape_index(dummy) {} Filter_unassigned_points(const std::vector<int> &shapeIndex) : m_shape_index(shapeIndex) {} bool operator()(std::size_t x) { if (x < m_shape_index.size()) return m_shape_index[x] == -1; else return true; // to prevent infinite incrementing } const std::vector<int>& m_shape_index; std::vector<int> dummy; }; typedef boost::filter_iterator<Filter_unassigned_points, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator; ///< iterator for indices of points. /// \endcond /// \name Types /// @{ /// \cond SKIP_IN_MANUAL typedef typename Traits::Input_range::iterator Input_iterator; typedef typename Traits::FT FT; ///< number type. typedef typename Traits::Point_3 Point; ///< point type. typedef typename Traits::Vector_3 Vector; ///< vector type. /// \endcond typedef typename Traits::Input_range Input_range; ///< Model of the concept `Range` with random access iterators, providing input points and normals /// through the following two property maps. typedef typename Traits::Point_map Point_map; ///< Property map to access the location of an input point. typedef typename Traits::Normal_map Normal_map; ///< Property map to access the unoriented normal of an input point. typedef Shape_base<Traits> Shape; ///< Shape type. typedef Plane<Traits> Plane_shape; ///< %Plane shape type. #ifdef DOXYGEN_RUNNING typedef unspecified_type Shape_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`. typedef unspecified_type Plane_range; ///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`. #else struct Shape_range : public Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base; Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; struct Plane_range : public Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> { typedef Iterator_range< typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base; Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > extracted_shapes) : Base(make_range(extracted_shapes->begin(), extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {} private: boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > m_extracted_shapes; // keeps a reference to the shape vector }; #endif #ifdef DOXYGEN_RUNNING typedef unspecified_type Point_index_range; ///< `Iterator_range` with a bidirectional iterator with value type `std::size_t` /// as indices into the input data that has not been assigned to a shape. /// As this range class has no `size()` method, the method /// `Efficient_RANSAC::number_of_unassigned_points()` is provided. #else typedef Iterator_range<Point_index_iterator> Point_index_range; #endif /// @} /// \name Parameters /// @{ /*! Parameters for the shape detection algorithm. They are explained in detail in Section \ref Shape_detection_RANSACParameters of the User Manual. */ struct Parameters { Parameters() : probability((FT) 0.01) , min_points((std::numeric_limits<std::size_t>::max)()) , epsilon(-1) , normal_threshold((FT) 0.9) , cluster_epsilon(-1) {} /*! Probability to control search endurance. %Default value is 0.05. A lower probability provides a higher reliability and determinism at the cost of longer running time due to a higher search endurance. It must belong to the interval [0, 1]. */ FT probability; /*! Minimum number of points in a shape. %Default value is 1% of total number of input points. It must belong to the interval [0, +inf). */ std::size_t min_points; /*! Maximum acceptable Euclidean distance between a point and a shape. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT epsilon; /*! Maximum threshold on the dot product between the estimated shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9). %Default value is 0.9 (around 25 degrees). It must belong to the interval [0, 1]. */ FT normal_threshold; /*! Maximum acceptable Euclidean distance between points, which are assumed to be neighbors. %Default value is 1% of the bounding box diagonal. It must belong to the interval [0, +inf). */ FT cluster_epsilon; }; /// @} private: typedef internal::Octree<internal::DirectPointAccessor<Traits> > Direct_octree; typedef internal::Octree<internal::IndexedPointAccessor<Traits> > Indexed_octree; //--------------------------------------------typedef // Creates a function pointer for instancing shape instances. template <class ShapeT> static Shape *factory() { return new ShapeT; } public: /// \name Initialization /// @{ /*! Constructs an empty shape detection object. */ Efficient_RANSAC(Traits t = Traits()) : m_traits(t) , m_direct_octrees(nullptr) , m_global_octree(nullptr) , m_num_subsets(0) , m_num_available_points(0) , m_num_total_points(0) , m_valid_iterators(false) {} /*! Releases all memory allocated by this instance including shapes. */ ~Efficient_RANSAC() { clear(); } /*! Retrieves the traits class. */ const Traits& traits() const { return m_traits; } /*! Retrieves the point property map. */ const Point_map& point_map() const { return m_point_pmap; } /*! Retrieves the normal property map. */ const Normal_map& normal() const { return m_normal_pmap; } Input_iterator input_iterator_first() const { return m_input_iterator_first; } Input_iterator input_iterator_beyond() const { return m_input_iterator_beyond; } /*! Sets the input data. The range must stay valid until the detection has been performed and the access to the results is no longer required. The data in the input is reordered by the methods `detect()` and `preprocess()`. This function first calls `clear()`. */ void set_input( Input_range& input_range, ///< Range of input data. Point_map point_map = Point_map(), ///< Property map to access the position of an input point. Normal_map normal_map = Normal_map() ///< Property map to access the normal of an input point. ) { m_point_pmap = point_map; m_normal_pmap = normal_map; m_input_iterator_first = input_range.begin(); m_input_iterator_beyond = input_range.end(); clear(); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points = std::distance( m_input_iterator_first, m_input_iterator_beyond); m_valid_iterators = true; } /*! Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`. For example, for registering a plane as detectable shape, you should call `ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note that if your call is within a template, you should add the `template` keyword just before `add_shape_factory`: `ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`. */ template <class Shape_type> void add_shape_factory() { m_shape_factories.push_back(factory<Shape_type>); } /*! Constructs internal data structures required for the shape detection. These structures only depend on the input data, i.e. the points and normal vectors. This method is called by `detect()`, if it was not called before by the user. */ bool preprocess() { if (m_num_total_points == 0) return false; // Generation of subsets m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t) std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2); // SUBSET GENERATION -> // approach with increasing subset sizes -> replace with octree later on Input_iterator last = m_input_iterator_beyond - 1; std::size_t remainingPoints = m_num_total_points; m_available_octree_sizes.resize(m_num_subsets); m_direct_octrees = new Direct_octree *[m_num_subsets]; for (int s = int(m_num_subsets) - 1;s >= 0;--s) { std::size_t subsetSize = remainingPoints; std::vector<std::size_t> indices(subsetSize); if (s) { subsetSize >>= 1; for (std::size_t i = 0;i<subsetSize;i++) { std::size_t index = get_default_random()(2); index = index + (i<<1); index = (index >= remainingPoints) ? remainingPoints - 1 : index; indices[i] = index; } // move points to the end of the point vector std::size_t j = subsetSize; do { j--; typename std::iterator_traits<Input_iterator>::value_type tmp = (*last); *last = m_input_iterator_first[indices[std::size_t(j)]]; m_input_iterator_first[indices[std::size_t(j)]] = tmp; last--; } while (j > 0); m_direct_octrees[s] = new Direct_octree( m_traits, last + 1, last + subsetSize + 1, m_point_pmap, m_normal_pmap, remainingPoints - subsetSize); } else m_direct_octrees[0] = new Direct_octree( m_traits, m_input_iterator_first, m_input_iterator_first + (subsetSize), m_point_pmap, m_normal_pmap, 0); m_available_octree_sizes[s] = subsetSize; m_direct_octrees[s]->createTree(m_options.cluster_epsilon); remainingPoints -= subsetSize; } m_global_octree = new Indexed_octree( m_traits, m_input_iterator_first, m_input_iterator_beyond, m_point_pmap, m_normal_pmap); m_global_octree->createTree(m_options.cluster_epsilon); return true; } /// @} /// \name Memory Management /// @{ /*! Removes all shape types registered for detection. */ void clear_shape_factories() { m_shape_factories.clear(); } /*! Frees memory allocated for the internal search structures but keeps the detected shapes. It invalidates the range retrieved using `unassigned_points()`. */ void clear_octrees() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; if (m_global_octree) { delete m_global_octree; m_global_octree = nullptr; } if (m_direct_octrees) { for (std::size_t i = 0;i<m_num_subsets;i++) delete m_direct_octrees[i]; delete [] m_direct_octrees; m_direct_octrees = nullptr; } m_num_subsets = 0; } /*! Calls `clear_octrees()` and removes all detected shapes. All internal structures are cleaned, including formerly detected shapes. Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()` are invalidated. */ void clear() { // If there is no data yet, there are no data structures. if (!m_valid_iterators) return; std::vector<int>().swap(m_shape_index); m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; clear_octrees(); clear_shape_factories(); } /// @} /// \name Detection /// @{ /*! Performs the shape detection. Shape types considered during the detection are those registered using `add_shape_factory()`. \param options parameters for shape detection \param callback can be omitted if the algorithm should be run without any callback. It is called regularly when the algorithm is running: the current advancement (between 0.0 and 1.0) is passed as parameter. If it returns `true`, then the algorithm continues its execution normally; if it returns `false`, the algorithm is stopped. Note that this interruption may leave the class in an invalid state. \return `true` if shape types have been registered and input data has been set. Otherwise, `false` is returned. */ bool detect(const Parameters &options = Parameters(), const std::function<bool(double)>& callback = std::function<bool(double)>()) { m_options = options; // No shape types for detection or no points provided, exit if (m_shape_factories.size() == 0 || (m_input_iterator_beyond - m_input_iterator_first) == 0) return false; if (m_num_subsets == 0 || m_global_octree == 0) { if (!preprocess()) return false; } if (callback && !callback(0.)) return false; // Reset data structures possibly used by former search m_extracted_shapes = boost::make_shared<std::vector<boost::shared_ptr<Shape> > >(); m_num_available_points = m_num_total_points; for (std::size_t i = 0;i<m_num_subsets;i++) { m_available_octree_sizes[i] = m_direct_octrees[i]->size(); } // Use bounding box diagonal as reference for default values Bbox_3 bbox = m_global_octree->boundingBox(); FT bbox_diagonal = (FT) CGAL::sqrt( (bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin()) + (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin()) + (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin())); // Epsilon or cluster_epsilon have been set by the user? // If not, derive from bounding box diagonal m_options.epsilon = (m_options.epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.epsilon; m_options.cluster_epsilon = (m_options.cluster_epsilon < 0) ? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon; // Minimum number of points has been set? m_options.min_points = (m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ? (std::size_t)((FT)0.01 * m_num_available_points) : m_options.min_points; m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points; // Initializing the shape index m_shape_index.assign(m_num_available_points, -1); if (m_options.min_points > m_num_available_points) return true; // List of all randomly drawn candidates // with the minimum number of points std::vector<Shape *> candidates; // Identifying minimum number of samples m_required_samples = 0; for (std::size_t i = 0;i<m_shape_factories.size();i++) { Shape *tmp = (Shape *) m_shape_factories[i](); m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size()); delete tmp; } std::size_t first_sample; // first sample for RANSAC FT best_expected = 0; // number of points that have been assigned to a shape std::size_t num_invalid = 0; std::size_t generated_candidates = 0; std::size_t failed_candidates = 0; std::size_t limit_failed_candidates = (std::max)(std::size_t(10000), std::size_t(m_input_iterator_beyond - m_input_iterator_first) / std::size_t(100)); bool force_exit = false; bool keep_searching = true; do { // main loop best_expected = 0; if (keep_searching) do { // Search (remaining_points / min_points) shapes (max 200 per iteration, min 1) std::size_t search_number = (std::min)(std::size_t(200), (std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)), std::size_t(1))); for (std::size_t nb = 0; nb < search_number; ++ nb) { // Generate candidates //1. pick a point p1 randomly among available points std::set<std::size_t> indices; bool done = false; do { do first_sample = get_default_random()( static_cast<unsigned int>(m_num_available_points)); while (m_shape_index[first_sample] != -1); done = m_global_octree->drawSamplesFromCellContainingPoint( get(m_point_pmap, *(m_input_iterator_first + first_sample)), select_random_octree_level(), indices, m_shape_index, m_required_samples); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } while (m_shape_index[first_sample] != -1 || !done); generated_candidates++; //add candidate for each type of primitives for(typename std::vector<Shape *(*)()>::iterator it = m_shape_factories.begin(); it != m_shape_factories.end(); it++) { if (callback && !callback(num_invalid / double(m_num_total_points))) return false; Shape *p = (Shape *) (*it)(); //compute the primitive and says if the candidate is valid p->compute(indices, m_input_iterator_first, m_traits, m_point_pmap, m_normal_pmap, m_options.epsilon, m_options.normal_threshold); if (p->is_valid()) { improve_bound(p, m_num_available_points - num_invalid, 1, 500); //evaluate the candidate if(p->max_bound() >= m_options.min_points && p->score() > 0) { if (best_expected < p->expected_value()) best_expected = p->expected_value(); candidates.push_back(p); } else { failed_candidates++; delete p; } } else { failed_candidates++; delete p; } } } if (failed_candidates >= limit_failed_candidates) { force_exit = true; } keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while( !force_exit && stop_probability((std::size_t) best_expected, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability && keep_searching); // end of generate candidate if (force_exit) { break; } if (candidates.empty()) continue; // Now get the best candidate in the current set of all candidates // Note that the function sorts the candidates: // the best candidate is always the last element of the vector Shape *best_candidate = get_best_candidate(candidates, m_num_available_points - num_invalid); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // If search is done and the best candidate is too small, we are done. if (!keep_searching && best_candidate->m_score < m_options.min_points) break; if (!best_candidate) continue; best_candidate->m_indices.clear(); best_candidate->m_score = m_global_octree->score(best_candidate, m_shape_index, FT(3) * m_options.epsilon, m_options.normal_threshold); best_expected = static_cast<FT>(best_candidate->m_score); best_candidate->connected_component(best_candidate->m_indices, m_options.cluster_epsilon); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // check score against min_points and clear out candidates if too low if (best_candidate->indices_of_assigned_points().size() < m_options.min_points) { if (!(best_candidate->indices_of_assigned_points().empty())) for (std::size_t i = 0;i < candidates.size() - 1;i++) { if (best_candidate->is_same(candidates[i])) { delete candidates[i]; candidates[i] = nullptr; } } candidates.back() = nullptr; delete best_candidate; best_candidate = nullptr; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; // Trimming candidates list std::size_t empty = 0, occupied = 0; while (empty < candidates.size()) { while (empty < candidates.size() && candidates[empty]) empty++; if (empty >= candidates.size()) break; if (occupied < empty) occupied = empty + 1; while (occupied < candidates.size() && !candidates[occupied]) occupied++; if (occupied >= candidates.size()) break; candidates[empty] = candidates[occupied]; candidates[occupied] = nullptr; empty++; occupied++; } candidates.resize(empty); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; } else if (stop_probability((std::size_t) best_candidate->expected_value(), (m_num_available_points - num_invalid), generated_candidates, m_global_octree->maxLevel()) <= m_options.probability) { // Remove candidate from list candidates.back() = nullptr; //1. add best candidate to final result. m_extracted_shapes->push_back( boost::shared_ptr<Shape>(best_candidate)); if (callback && !callback(num_invalid / double(m_num_total_points))) return false; //2. remove the points const std::vector<std::size_t> &indices_points_best_candidate = best_candidate->indices_of_assigned_points(); // update generated candidates to reflect removal of points generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() / float(m_num_available_points - num_invalid)), 3.f) * generated_candidates); //2.3 Remove the points from the subtrees for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) { m_shape_index[indices_points_best_candidate.at(i)] = int(m_extracted_shapes->size()) - 1; num_invalid++; for (std::size_t j = 0;j<m_num_subsets;j++) { if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) { std::size_t offset = m_direct_octrees[j]->offset(); if (offset <= indices_points_best_candidate.at(i) && (indices_points_best_candidate.at(i) - offset) < m_direct_octrees[j]->size()) { m_available_octree_sizes[j]--; } } } } failed_candidates = 0; best_expected = 0; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::vector<std::size_t> subset_sizes(m_num_subsets); subset_sizes[0] = m_available_octree_sizes[0]; for (std::size_t i = 1;i<m_num_subsets;i++) { subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i]; } //3. Remove points from candidates common with extracted primitive //#pragma omp parallel for best_expected = 0; for (std::size_t i=0;i< candidates.size()-1;i++) { if (candidates[i]) { candidates[i]->update_points(m_shape_index); candidates[i]->compute_bound( subset_sizes[candidates[i]->m_nb_subset_used - 1], m_num_available_points - num_invalid); if (candidates[i]->max_bound() < m_options.min_points) { delete candidates[i]; candidates[i] = nullptr; } else { best_expected = (candidates[i]->expected_value() > best_expected) ? candidates[i]->expected_value() : best_expected; } } } if (callback && !callback(num_invalid / double(m_num_total_points))) return false; std::size_t start = 0, end = candidates.size() - 1; while (start < end) { while (candidates[start] && start < end) start++; while (!candidates[end] && start < end) end--; if (!candidates[start] && candidates[end] && start < end) { candidates[start] = candidates[end]; candidates[end] = nullptr; start++; end--; } } if (candidates[end]) end++; candidates.resize(end); } else if (!keep_searching) ++ generated_candidates; if (callback && !callback(num_invalid / double(m_num_total_points))) return false; keep_searching = (stop_probability(m_options.min_points, m_num_available_points - num_invalid, generated_candidates, m_global_octree->maxLevel()) > m_options.probability); } while((keep_searching && FT(m_num_available_points - num_invalid) >= m_options.min_points) || best_expected >= m_options.min_points); // Clean up remaining candidates. for (std::size_t i = 0;i<candidates.size();i++) delete candidates[i]; candidates.resize(0); m_num_available_points -= num_invalid; return true; } /// @} /// \name Access /// @{ /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Shape>` over the detected shapes in the order of detection. Depending on the chosen probability for the detection, the shapes are ordered with decreasing size. */ Shape_range shapes() const { return Shape_range(m_extracted_shapes); } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `boost::shared_ptr<Plane_shape>` over only the detected planes in the order of detection. Depending on the chosen probability for the detection, the planes are ordered with decreasing size. */ Plane_range planes() const { boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes = boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >(); for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i) { boost::shared_ptr<Plane_shape> pshape = boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]); // Ignore all shapes other than plane if (pshape != boost::shared_ptr<Plane_shape>()) planes->push_back (pshape); } return Plane_range(planes); } /*! Number of points not assigned to a shape. */ std::size_t number_of_unassigned_points() const { return m_num_available_points; } /*! Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t` as indices into the input data that has not been assigned to a shape. */ Point_index_range indices_of_unassigned_points() { Filter_unassigned_points fup(m_shape_index); Point_index_iterator p1 = boost::make_filter_iterator<Filter_unassigned_points>( fup, boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0), boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size())); return make_range(p1, Point_index_iterator(p1.end())); } /// @} private: int select_random_octree_level() { return (int) get_default_random()( static_cast<unsigned int>(m_global_octree->maxLevel() + 1)); } Shape* get_best_candidate(std::vector<Shape* >& candidates, const std::size_t num_available_points) { if (candidates.size() == 1) return candidates.back(); int index_worse_candidate = 0; bool improved = true; while (index_worse_candidate < (int)candidates.size() - 1 && improved) { improved = false; typename Shape::Compare_by_max_bound comp; std::sort(candidates.begin() + index_worse_candidate, candidates.end(), comp); //refine the best one improve_bound(candidates.back(), num_available_points, m_num_subsets, m_options.min_points); int position_stop; //Take all those intersecting the best one, check for equal ones for (position_stop = int(candidates.size()) - 1; position_stop > index_worse_candidate; position_stop--) { if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore if (candidates.at(position_stop)->max_bound() <= m_options.min_points) break; //the following candidate doesn't have enough points! //if we reach this point, there is an overlap // between best one and position_stop //so request refining bound on position_stop improved |= improve_bound(candidates.at(position_stop), num_available_points, m_num_subsets, m_options.min_points); //test again after refined if (candidates.back()->min_bound() > candidates.at(position_stop)->max_bound()) break;//the intervals do not overlaps anymore } index_worse_candidate = position_stop; } return candidates.back(); } bool improve_bound(Shape *candidate, std::size_t num_available_points, std::size_t max_subset, std::size_t min_points) { if (candidate->m_nb_subset_used >= max_subset) return false; if (candidate->m_nb_subset_used >= m_num_subsets) return false; candidate->m_nb_subset_used = (candidate->m_nb_subset_used >= m_num_subsets) ? m_num_subsets - 1 : candidate->m_nb_subset_used; //what it does is add another subset and recompute lower and upper bound //the next subset to include is provided by m_nb_subset_used std::size_t num_points_evaluated = 0; for (std::size_t i=0;i<candidate->m_nb_subset_used;i++) num_points_evaluated += m_available_octree_sizes[i]; // need score of new subset as well as sum of // the score of the previous considered subset std::size_t new_score = 0; std::size_t new_sampled_points = 0; do { new_score = m_direct_octrees[candidate->m_nb_subset_used]->score( candidate, m_shape_index, m_options.epsilon, m_options.normal_threshold); candidate->m_score += new_score; num_points_evaluated += m_available_octree_sizes[candidate->m_nb_subset_used]; new_sampled_points += m_available_octree_sizes[candidate->m_nb_subset_used]; candidate->m_nb_subset_used++; } while (new_sampled_points < min_points && candidate->m_nb_subset_used < m_num_subsets); candidate->m_score = candidate->m_indices.size(); candidate->compute_bound(num_points_evaluated, num_available_points); return true; } inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const { return (std::min<FT>)((FT)std::pow(FT(1) - FT(largest_candidate) / (FT(num_pts) * FT(octree_depth+1) * FT(1 << (m_required_samples - 1))), int(num_candidates)), FT(1)); } private: Parameters m_options; // Traits class. Traits m_traits; // Octrees build on input data for quick shape evaluation and // sample selection within an octree cell. Direct_octree **m_direct_octrees; Indexed_octree *m_global_octree; std::vector<std::size_t> m_available_octree_sizes; std::size_t m_num_subsets; // maps index into points to assigned extracted primitive std::vector<int> m_shape_index; std::size_t m_num_available_points; std::size_t m_num_total_points; std::size_t m_required_samples; //give the index of the subset of point i std::vector<int> m_index_subsets; boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes; std::vector<Shape *(*)()> m_shape_factories; // iterators of input data bool m_valid_iterators; Input_iterator m_input_iterator_first, m_input_iterator_beyond; Point_map m_point_pmap; Normal_map m_normal_pmap; }; } } #endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
column_matrix.h
/*! * Copyright 2017 by Contributors * \file column_matrix.h * \brief Utility for fast column-wise access * \author Philip Cho */ #ifndef TSOOBGX_COMMON_COLUMN_MATRIX_H_ #define TSOOBGX_COMMON_COLUMN_MATRIX_H_ #include <limits> #include <vector> #include "hist_util.h" namespace tsoobgx { namespace common { /*! \brief column type */ enum ColumnType { kDenseColumn, kSparseColumn }; /*! \brief a column storage, to be used with ApplySplit. Note that each bin id is stored as index[i] + index_base. */ class Column { public: Column(ColumnType type, const uint32_t* index, uint32_t index_base, const size_t* row_ind, size_t len) : type_(type), index_(index), index_base_(index_base), row_ind_(row_ind), len_(len) {} size_t Size() const { return len_; } uint32_t GetGlobalBinIdx(size_t idx) const { return index_base_ + index_[idx]; } uint32_t GetFeatureBinIdx(size_t idx) const { return index_[idx]; } // column.GetFeatureBinIdx(idx) + column.GetBaseIdx(idx) == // column.GetGlobalBinIdx(idx) uint32_t GetBaseIdx() const { return index_base_; } ColumnType GetType() const { return type_; } size_t GetRowIdx(size_t idx) const { // clang-tidy worries that row_ind_ might be a nullptr, which is possible, // but low level structure is not safe anyway. return type_ == ColumnType::kDenseColumn ? idx : row_ind_[idx]; // NOLINT } bool IsMissing(size_t idx) const { return index_[idx] == std::numeric_limits<uint32_t>::max(); } const size_t* GetRowData() const { return row_ind_; } private: ColumnType type_; const uint32_t* index_; uint32_t index_base_; const size_t* row_ind_; const size_t len_; }; /*! \brief a collection of columns, with support for construction from GHistIndexMatrix. */ class ColumnMatrix { public: // get number of features inline bst_uint GetNumFeature() const { return static_cast<bst_uint>(type_.size()); } // construct column matrix from GHistIndexMatrix inline void Init(const GHistIndexMatrix& gmat, double sparse_threshold) { const int32_t nfeature = static_cast<int32_t>(gmat.cut.row_ptr.size() - 1); const size_t nrow = gmat.row_ptr.size() - 1; // identify type of each column feature_counts_.resize(nfeature); type_.resize(nfeature); std::fill(feature_counts_.begin(), feature_counts_.end(), 0); uint32_t max_val = std::numeric_limits<uint32_t>::max(); for (bst_uint fid = 0; fid < nfeature; ++fid) { CHECK_LE(gmat.cut.row_ptr[fid + 1] - gmat.cut.row_ptr[fid], max_val); } gmat.GetFeatureCounts(&feature_counts_[0]); // classify features for (int32_t fid = 0; fid < nfeature; ++fid) { if (static_cast<double>(feature_counts_[fid]) < sparse_threshold * nrow) { type_[fid] = kSparseColumn; } else { type_[fid] = kDenseColumn; } } // want to compute storage boundary for each feature // using variants of prefix sum scan boundary_.resize(nfeature); size_t accum_index_ = 0; size_t accum_row_ind_ = 0; for (int32_t fid = 0; fid < nfeature; ++fid) { boundary_[fid].index_begin = accum_index_; boundary_[fid].row_ind_begin = accum_row_ind_; if (type_[fid] == kDenseColumn) { accum_index_ += static_cast<size_t>(nrow); accum_row_ind_ += static_cast<size_t>(nrow); } else { accum_index_ += feature_counts_[fid]; accum_row_ind_ += feature_counts_[fid]; } boundary_[fid].index_end = accum_index_; boundary_[fid].row_ind_end = accum_row_ind_; } index_.resize(boundary_[nfeature - 1].index_end); row_ind_.resize(boundary_[nfeature - 1].row_ind_end); // store least bin id for each feature index_base_.resize(nfeature); for (bst_uint fid = 0; fid < nfeature; ++fid) { index_base_[fid] = gmat.cut.row_ptr[fid]; } // pre-fill index_ for dense columns #pragma omp parallel for for (int32_t fid = 0; fid < nfeature; ++fid) { if (type_[fid] == kDenseColumn) { const size_t ibegin = boundary_[fid].index_begin; uint32_t* begin = &index_[ibegin]; uint32_t* end = begin + nrow; std::fill(begin, end, std::numeric_limits<uint32_t>::max()); // max() indicates missing values } } // loop over all rows and fill column entries // num_nonzeros[fid] = how many nonzeros have this feature accumulated so far? std::vector<size_t> num_nonzeros; num_nonzeros.resize(nfeature); std::fill(num_nonzeros.begin(), num_nonzeros.end(), 0); for (size_t rid = 0; rid < nrow; ++rid) { const size_t ibegin = gmat.row_ptr[rid]; const size_t iend = gmat.row_ptr[rid + 1]; size_t fid = 0; for (size_t i = ibegin; i < iend; ++i) { const uint32_t bin_id = gmat.index[i]; while (bin_id >= gmat.cut.row_ptr[fid + 1]) { ++fid; } if (type_[fid] == kDenseColumn) { uint32_t* begin = &index_[boundary_[fid].index_begin]; begin[rid] = bin_id - index_base_[fid]; } else { uint32_t* begin = &index_[boundary_[fid].index_begin]; begin[num_nonzeros[fid]] = bin_id - index_base_[fid]; row_ind_[boundary_[fid].row_ind_begin + num_nonzeros[fid]] = rid; ++num_nonzeros[fid]; } } } } /* Fetch an individual column. This code should be used with TSOOBGX_TYPE_SWITCH to determine type of bin id's */ inline Column GetColumn(unsigned fid) const { Column c(type_[fid], &index_[boundary_[fid].index_begin], index_base_[fid], (type_[fid] == ColumnType::kSparseColumn ? &row_ind_[boundary_[fid].row_ind_begin] : nullptr), boundary_[fid].index_end - boundary_[fid].index_begin); return c; } private: struct ColumnBoundary { // indicate where each column's index and row_ind is stored. // index_begin and index_end are logical offsets, so they should be converted to // actual offsets by scaling with packing_factor_ size_t index_begin; size_t index_end; size_t row_ind_begin; size_t row_ind_end; }; std::vector<size_t> feature_counts_; std::vector<ColumnType> type_; SimpleArray<uint32_t> index_; // index_: may store smaller integers; needs padding SimpleArray<size_t> row_ind_; std::vector<ColumnBoundary> boundary_; // index_base_[fid]: least bin id for feature fid std::vector<uint32_t> index_base_; }; } // namespace common } // namespace tsoobgx #endif // TSOOBGX_COMMON_COLUMN_MATRIX_H_
GB_unaryop__minv_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_uint16 // op(A') function: GB_tran__minv_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_uint16 ( int8_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
naive_math_impl.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <cmath> template <typename type> static void basic_trans_mat_to_c4(const type* input, type* output, const int ldin, const int M, const int K, bool pack_k) { const int m_round = (M + 3) / 4 * 4; int k_round = (K + 3) / 4 * 4; if (!pack_k) { k_round = K; } const int m_loop = m_round / 4; type* zero_buf = new type[K]; memset(zero_buf, 0, K * sizeof(type)); for (int i = 0; i < m_loop; ++i) { const type* in0 = input + i * 4 * ldin; const type* in1 = in0 + ldin; const type* in2 = in1 + ldin; const type* in3 = in2 + ldin; if (4 * (i + 1) - M > 0) { switch (4 * (i + 1) - M) { case 3: in1 = zero_buf; case 2: in2 = zero_buf; case 1: in3 = zero_buf; default: break; } } for (int j = 0; j < K; ++j) { *output++ = *in0++; *output++ = *in1++; *output++ = *in2++; *output++ = *in3++; } for (int j = K; j < k_round; ++j) { *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); } } delete[] zero_buf; } template <typename type> static void basic_trans_mat_to_c8(const type* input, type* output, const int ldin, const int M, const int K, bool pack_k) { const int m_round = (M + 7) / 8 * 8; int k_round = (K + 7) / 8 * 8; if (!pack_k) { k_round = K; } const int m_loop = m_round / 8; type zero_buf[K]; memset(zero_buf, 0, K * sizeof(type)); for (int i = 0; i < m_loop; ++i) { const type* in0 = input + i * 8 * ldin; const type* in1 = in0 + ldin; const type* in2 = in1 + ldin; const type* in3 = in2 + ldin; const type* in4 = in3 + ldin; const type* in5 = in4 + ldin; const type* in6 = in5 + ldin; const type* in7 = in6 + ldin; if (8 * (i + 1) - M > 0) { switch (8 * (i + 1) - M) { case 7: in1 = zero_buf; case 6: in2 = zero_buf; case 5: in3 = zero_buf; case 4: in4 = zero_buf; case 3: in5 = zero_buf; case 2: in6 = zero_buf; case 1: in7 = zero_buf; default: break; } } for (int j = 0; j < K; ++j) { *output++ = *in0++; *output++ = *in1++; *output++ = *in2++; *output++ = *in3++; *output++ = *in4++; *output++ = *in5++; *output++ = *in6++; *output++ = *in7++; } for (int j = K; j < k_round; ++j) { *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); } } } template <typename type, typename type2> static void basic_gemm_c4(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, bool flag_relu = false) { type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2))); memset(tmp_c, 0, m * ldc * sizeof(type2)); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data; if (flag_relu) { tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0; } else { tmp_c[i * ldc + j] = tmp; } } } //! trans c to c4 basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false); free(tmp_c); } template <typename type, typename type2> static void basic_gemm_c8(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, bool flag_relu = false) { type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2))); memset(tmp_c, 0, m * ldc * sizeof(type2)); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data; if (flag_relu) { tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0; } else { tmp_c[i * ldc + j] = tmp; } } } //! trans c to c4 basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false); free(tmp_c); } template <typename type, typename type2> static void basic_gemm(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, bool flag_relu = false) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data; if (flag_relu) { c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0; } else { c[i * ldc + j] = tmp; } } } } template <typename type, typename type2> static void basic_gemv(int m, int k, const type* a, const type* b, const type2* bias, type2* c, type2 alpha, type2 beta, bool trans_a = false, bool flag_bias = false, int flag_act = false, float six = 6.f, float leakey_relu_alpha = 1.f) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } auto sum = static_cast<type2>(0); for (int j = 0; j < k; ++j) { type av; if (trans_a) { av = a[j * m + i]; } else { av = a[i * k + j]; } sum += av * b[j]; } type2 tmp = alpha * sum + beta * c[i] + bias_data; if (flag_act > 0) { if (flag_act == 1) { // relu c[i] = tmp > (type2)0 ? tmp : (type2)0; } else if (flag_act == 2) { // relu 6 c[i] = tmp > (type2)0 ? tmp : (type2)0; c[i] = c[i] < six ? c[i] : six; // ut compute } else if (flag_act == 4) { // leakey relu c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp; } } else { c[i] = tmp; } } } /** * \brief basic direct convolution function */ //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> static void conv_basic(const Dtype1* din, Dtype2* dout, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w, int pad_h, bool flag_bias, int act_type, float six = 6.f, float scale = 1.f) { Dtype2 beta = 0; auto src_data = din; auto dst_data_ref = dout; auto weights_data = weights; auto with_bias = flag_bias; auto bias_data = bias; int in_num = num; int out_channels = chout; int out_h = hout; int out_w = wout; int in_channel = chin; int in_h = hin; int in_w = win; int out_c_group = out_channels / group; int in_c_group = in_channel / group; for (int n = 0; n < in_num; ++n) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int g = 0; g < group; ++g) { for (int oc = 0; oc < out_c_group; ++oc) { for (int oh = 0; oh < out_h; ++oh) { for (int ow = 0; ow < out_w; ++ow) { int out_idx = n * group * out_c_group * out_h * out_w + g * out_c_group * out_h * out_w + oc * out_h * out_w + oh * out_w + ow; Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0; dst_data_ref[out_idx] = bias_d + dst_data_ref[out_idx] * beta; for (int ic = 0; ic < in_c_group; ++ic) { for (int kh = 0; kh < kernel_h; ++kh) { for (int kw = 0; kw < kernel_w; ++kw) { int iw = ow * stride_w - pad_w + kw * (dila_w); int ih = oh * stride_h - pad_h + kh * (dila_h); if (iw < 0 || iw >= in_w) continue; if (ih < 0 || ih >= in_h) continue; int iidx = n * in_channel * in_h * in_w + g * in_c_group * in_h * in_w + ic * in_h * in_w + ih * in_w + iw; int widx = g * out_c_group * in_c_group * kernel_h * kernel_w + oc * in_c_group * kernel_h * kernel_w + ic * kernel_h * kernel_w + kh * kernel_w + kw; dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx]; } } } if (act_type > 0) { // 1-relu 2-relu6 4-leakyrelu if (act_type == 1) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)0; } else if (act_type == 2) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)0; dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six ? dst_data_ref[out_idx] : (Dtype2)six; } else if (act_type == 4) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)(dst_data_ref[out_idx] * scale); } else { printf("this act type: %d does not support \n", act_type); } } } } } } } } template <typename Dtype> static void fill_bias_relu(Dtype* tensor, const Dtype* bias, int channel, int channel_size, bool flag_bias, bool flag_relu) { Dtype* data = tensor; for (int j = 0; j < channel; ++j) { Dtype bias_c = flag_bias ? bias[j] : 0; for (int i = 0; i < channel_size; i++) { data[i] += bias_c; if (flag_relu) { data[i] = data[i] > 0 ? data[i] : 0.f; } } data += channel_size; } } template <typename Dtype> static void do_relu(Dtype* tensor, int size) { for (int j = 0; j < size; ++j) { tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0; } } inline bool is_a_ge_zero_and_a_lt_b(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } template <typename Dtype> static void col2im(const Dtype* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h0, const int pad_h1, const int pad_w0, const int pad_w1, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_im) { memset(data_im, 0, height * width * channels * sizeof(Dtype)); const int output_h = (height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int channel_size = height * width; for (int channel = channels; channel--; data_im += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h0 + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!is_a_ge_zero_and_a_lt_b(input_row, height)) { data_col += output_w; } else { int input_col = -pad_w0 + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (is_a_ge_zero_and_a_lt_b(input_col, width)) { data_im[input_row * width + input_col] += *data_col; } data_col++; input_col += stride_w; } } input_row += stride_h; } } } } } //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> void deconv_basic(const Dtype1* din, Dtype2* dout, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w0, int pad_w1, int pad_h0, int pad_h1, bool flag_bias, bool flag_relu) { int m = chout * kernel_w * kernel_h / group; int n = hin * win; int k = chin / group; int group_size_in = win * hin * chin / group; int group_size_coldata = m * n; int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group); bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) && (stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) && (dila_h == 1); Dtype2* workspace_ptr = static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group)); for (int i = 0; i < num; ++i) { const Dtype1* din_batch = din + i * chin * hin * win; Dtype2* dout_batch = dout + i * chout * hout * wout; Dtype2* col_data = workspace_ptr; if (flag_1x1s1p1) { col_data = dout_batch; } memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group); for (int g = 0; g < group; ++g) { const Dtype1* din_group = din_batch + g * group_size_in; const Dtype1* weights_group = weights + g * group_size_weights; Dtype2* coldata_group = col_data + g * group_size_coldata; basic_gemm<Dtype1, Dtype2>(true, false, m, n, k, 1, weights_group, m, din_group, n, 0, coldata_group, n, nullptr, false, (!flag_bias && flag_relu)); } if (!flag_1x1s1p1) { col2im(col_data, chout, hout, wout, kernel_h, kernel_w, pad_h0, pad_h1, pad_w0, pad_w1, stride_h, stride_w, dila_h, dila_w, dout_batch); } //! add bias if (flag_bias) { fill_bias_relu( dout_batch, bias, chout, wout * hout, flag_bias, flag_relu); } } free(workspace_ptr); } float deformable_bilinear(const float* bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; if (h_low >= height - 1) { h_high = h_low = height - 1; h = static_cast<float>(h_low); } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = static_cast<float>(w_low); } else { w_high = w_low + 1; } float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh; float hw = 1 - lw; float v1 = bottom_data[h_low * data_width + w_low]; float v2 = bottom_data[h_low * data_width + w_high]; float v3 = bottom_data[h_high * data_width + w_low]; float v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw; float w2 = hh * lw; float w3 = lh * hw; float w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> void deformable_conv_basic(const Dtype1* in_data, const float* offset_data, const float* mask_data, Dtype2* out_data, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w, int pad_h, bool flag_bias, bool flag_relu, bool modulated) { int out_c_group = chout / group; int in_c_group = chin / group; int in_size = hin * win; int out_size = hout * wout; int c_in_size = chin * in_size; int c_out_size = chout * out_size; int kernel_size = kernel_w * kernel_h; for (int n = 0; n < num; n++) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int g = 0; g < group; ++g) { for (int oc = 0; oc < out_c_group; ++oc) { for (int oh = 0; oh < hout; oh++) { for (int ow = 0; ow < wout; ow++) { int out_idx = n * c_out_size + g * out_c_group * out_size + oc * out_size + oh * wout + ow; Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0; out_data[out_idx] = bias_d + out_data[out_idx]; for (int ic = 0; ic < in_c_group; ++ic) { for (int fh = 0; fh < kernel_h; fh++) { for (int fw = 0; fw < kernel_w; fw++) { const float* offset_data_ptr = offset_data + n * group * 2 * kernel_size * out_size + g * 2 * kernel_size * out_size; const int data_offset_h_ptr = ((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow; const int data_offset_w_ptr = ((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow; const float offset_h = offset_data_ptr[data_offset_h_ptr]; const float offset_w = offset_data_ptr[data_offset_w_ptr]; const float iw = ow * stride_w - pad_w + kernel_w * dila_w + offset_w; const float ih = oh * stride_h - pad_h + kernel_h * dila_h + offset_h; if (ih >= 0 && ih < hin && iw >= 0 && iw < win) { const float map_h = kernel_h * dila_h + offset_h; const float map_w = kernel_w * dila_w + offset_w; const int cur_height = hin - (oh * stride_h - pad_h); const int cur_width = win - (ow * stride_w - pad_w); const float* in_data_offset = in_data + n * c_in_size + (g * in_c_group + ic) * in_size + (oh * stride_h - pad_h) * win + (ow * stride_w - pad_w); float val = deformable_bilinear(in_data_offset, win, cur_height, cur_width, map_h, map_w); if (modulated) { // use mask const float* mask_ptr = mask_data + n * group * kernel_size * out_size + g * kernel_size * out_size + (fh * kernel_w + fw) * hout * wout + oh * wout + ow; val *= mask_ptr[0]; } int widx = g * out_c_group * in_c_group * kernel_size + oc * in_c_group * kernel_size + ic * kernel_size + fh * kernel_w + fw; out_data[out_idx] += val * weights[widx]; } } } } if (flag_relu) { out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0; } } } } } } }
errors.h
#ifndef __errors_h__ #define __errors_h__ #include <iostream> #include <string> static constexpr int ERROR_POSITIVE = 1; ///< Return Error static constexpr int ERROR_RANGE = 2; ///< Return Error static constexpr int ERROR_ACCURACY = 3; ///< Return Error static constexpr int ERROR_EXACT = 4; ///< Return Error static constexpr int ERROR_INFINITE = 5; ///< Return Error static constexpr int ERROR_WEIGHTS = 6; ///< Return Error static constexpr int ERROR_PATTERN = 7; ///< Return Error static constexpr int ERROR_PROTOCOL = 8; ///< Return Error static constexpr int ERROR_MESSAGES = 91; ///< Return Error static constexpr int ERROR_INVALID_MESSAGES = 92; ///< Return Error // ERRORS 101-105 are related to raising errors in parse_args.h static constexpr int ERROR_MAGNETIZATION = 111; ///< Return Error static constexpr int ERROR_ATANHERF_FILE = 112; ///< Return Error /** * @brief Raise error because N is not positive * * @param N The given value of N */ inline void error_Npositive (const long int & N) { std :: cerr << "N must be positive; given: " << N << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because K is not positive. * * @param K The given value of K. */ inline void error_Kpositive (const int & K) { std :: cerr << "K must be positive; given: " << K << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because max_iters is not positive. * * @param max_iters The given value of max_iters. */ inline void error_maxiters (const long int & max_iters) { std :: cerr << "max_iters must be non-negative; given: " << max_iters << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because max_steps is not positive. * * @param max_steps The given value of max_stes. */ inline void error_maxsteps (const long int & max_steps) { std :: cerr << "max_steps must be non-negative; given: " << max_steps << std :: endl; std :: exit(ERROR_POSITIVE); } /** * @brief Raise error because damping is not bounded in [0, 1). * * @param damping The given value of damping. */ inline void error_damping (const double & damping) { std :: cerr << "damping must be in [0,1); given: " << damping << std :: endl; std :: exit(ERROR_RANGE); } /** * @brief Raise error because randfact is not bounded in [0, 1). * * @param randfact The given value of randfact. */ inline void error_randfact (const double & randfact) { std :: cerr << "randfact must be in [0,1); given: " << randfact << std :: endl; std :: exit(ERROR_RANGE); } /** * @brief Raise error because accuracy has not a valid value. * * @param accuracy The given value of accuracy. */ inline void error_accuracy (const std :: string & accuracy) { std :: cerr << "Invalid accuracy variable given. Given : " << accuracy << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because too many accuracies were given. * */ inline void error_num_accuracy () { std :: cerr << "Too many accuracy variables given. Needed two." << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because accuracy1 has not a valid value. * * @param accuracy1 The given value of accuracy1. */ inline void error_accuracy1 (const std :: string & accuracy1) { std :: cerr << "accuracy1 must be one of 'exact', 'accurate' or 'none'; given: " << accuracy1 << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because accuracy2 has not a valid value. * * @param accuracy2 The given value of accuracy2. */ inline void error_accuracy2 (const std :: string & accuracy2) { std :: cerr << "accuracy2 must be one of 'exact', 'accurate' or 'none'; given: " << accuracy2 << std :: endl; std :: exit(ERROR_ACCURACY); } /** * @brief Raise error because with accuracy1 == 'exact' the value of N must be odd. * * @param N The given value of N. */ inline void error_Nexact (const int & N) { std :: cerr << "when accuracy1 == 'exact', N must be odd; given: " << N << std :: endl; std :: exit(ERROR_EXACT); } /** * @brief Raise error because with accuracy2 == 'exact' the value of K must be odd. * * @param K The given value of K. */ inline void error_Kexact (const int & K) { std :: cerr << "when accuracy2 == 'exact', K must be odd; given: " << K << std :: endl; std :: exit(ERROR_EXACT); } /** * @brief Raise error because beta is finite (not yet supported). * * @param beta The given value of beta. */ inline void error_infinite (const double & beta) { #ifdef _OPENMP #pragma omp single #endif std :: cerr << "finite beta not yet supported; given: " << beta << std :: endl; std :: exit(ERROR_INFINITE); } /** * @brief Raise error because weight filename is not found. * * @param filename The given value of filename. */ inline void error_message_weights (const std :: string & filename) { #ifdef _OPENMP #pragma omp single #endif std :: cerr << "Weights file not found! Given : " << filename << std :: endl; std :: exit(ERROR_WEIGHTS); } /** * @brief Raise error because input filename is not found. * * @param filename The given value of filename. */ inline void error_pattern (const std :: string & filename) { std :: cerr << "Input file not found! Given: " << filename << std :: endl; std :: exit(ERROR_PATTERN); } /** * @brief Raise error because protocol type is invalid * * @param prot The given value of protocol type */ inline void error_protocol (const std :: string & prot) { std :: cerr << "Invalid protocol parameter. Given: " << prot << ". Possible values are \"scoping\", \"pseudo_reinforcement\", \"free_scoping\" and \"standard_reinforcement\"" << std :: endl; std :: exit(ERROR_PATTERN); } /** * @brief Raise error because messages filename is not found. * * @param filename The given value of filename. */ inline void error_messages (const std :: string & filename) { std :: cerr << "Messages file not found! Given: " << filename << std :: endl; std :: exit(ERROR_MESSAGES); } /** * @brief Raise error because messages filename is not well formatted. * * @param filename The given value of filename. */ inline void error_invalid_messages (const std :: string & filename) { std :: cerr << "Invalid messages file! Given: " << filename << std :: endl; std :: exit(ERROR_INVALID_MESSAGES); } /** * @brief Raise error because Mag parameter is not valid. * * @param mag The given value of mag. */ inline void error_magnetization (const int & mag) { std :: cerr << "Invalid magnetization found. Given : " << mag << std :: endl; std :: exit(ERROR_MAGNETIZATION); } /** * @brief Raise error because atanherf interpolation filename is not found. * It suggests how to download it. * */ inline void error_atanherf_file () { std :: cerr << "Atanherf interpolation coefficients file not found." " Please download it before running to prevent some troubles." " The appropriated script could be used and you can find it in the 'scripts' directory." " Just run 'python download_atanherf.py'" << std :: endl; std :: exit(ERROR_ATANHERF_FILE); } #endif // __errors_h__
depth_first_search.c
#include <omp.h> #include <time.h> #include "problem.h" #include "depth_first_search.h" struct ProblemSolution run_recursive_depth_first_search(const struct ProblemInstance* instance) { struct ProblemSolution best_solution = { .array = malloc(sizeof(int) * instance->n), .size = instance->n, .cost = 0, .is_valid = false, }; // would be deallocated by do_depth_first_search struct ProblemSolution* blank_solution = malloc(sizeof(struct ProblemSolution)); blank_solution->array = malloc(sizeof(int) * instance->n); blank_solution->size = instance->n; blank_solution->cost = 0; blank_solution->is_valid = false; for (int i = 0; i < instance->n; i++) { blank_solution->array[i] = 0; } #pragma omp parallel { #pragma omp single { do_recursive_depth_first_search(instance, &best_solution, blank_solution, instance->a, 0); } } if (best_solution.is_valid == false) { printf("Panic! Depth First Search has faild. Algorithm is not correct.\n"); } // !! we know that at least one cut is exist // !! that is why the below pointer dereference must be valid return best_solution; } void do_recursive_depth_first_search ( const struct ProblemInstance* instance, struct ProblemSolution* best_solution, struct ProblemSolution* current_solution, int graph_capacity, int depth ) { struct ProblemSolution* include_solution; struct ProblemSolution* exclude_solution; if (graph_capacity == 0 || depth == instance->n) { calculate_cut_cost(current_solution, instance); if (best_solution->is_valid == false || best_solution->cost > current_solution->cost) { // temporary allocates memory on the heap validate_solution(current_solution, instance); if (current_solution->is_valid) { #pragma omp critical { best_solution->is_valid = true; best_solution->cost = current_solution->cost; for (int i = 0; i < instance->n; i++) { best_solution->array[i] = current_solution->array[i]; } } } } free(current_solution->array); free(current_solution); return; } include_solution = malloc(sizeof(struct ProblemSolution)); include_solution->array = malloc(sizeof(int) * instance->n); include_solution->size = instance->n; include_solution->cost = 0; include_solution->is_valid = false; exclude_solution = malloc(sizeof(struct ProblemSolution)); exclude_solution->array = malloc(sizeof(int) * instance->n); exclude_solution->size = instance->n; exclude_solution->cost = 0; exclude_solution->is_valid = false; for (int i = 0; i < instance->n; i++) { include_solution->array[i] = current_solution->array[i]; exclude_solution->array[i] = current_solution->array[i]; } free(current_solution->array); free(current_solution); include_solution->array[depth] = 1; exclude_solution->array[depth] = 0; depth++; #pragma omp task { do_recursive_depth_first_search(instance, best_solution, include_solution, graph_capacity - 1, depth); } #pragma omp task { do_recursive_depth_first_search(instance, best_solution, exclude_solution, graph_capacity , depth); } }
data.h
/*! * Copyright (c) 2015 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <rabit/rabit.h> #include <xgboost/base.h> #include <memory> #include <numeric> #include <algorithm> #include <string> #include <utility> #include <vector> #include "../../src/common/span.h" #include "../../src/common/group_data.h" #include "../../src/common/host_device_vector.h" namespace xgboost { // forward declare learner. class LearnerImpl; /*! \brief data type accepted by xgboost interface */ enum DataType { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of rows in the data */ uint64_t num_row_{0}; /*! \brief number of columns in the data */ uint64_t num_col_{0}; /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; /*! \brief label of each instance */ HostDeviceVector<bst_float> labels_; /*! * \brief specified root index of each instance, * can be used for multi task setting */ std::vector<bst_uint> root_index_; /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_uint> group_ptr_; /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ HostDeviceVector<bst_float> base_margin_; /*! \brief version flag, used to check version of this info */ static const int kVersion = 3; /*! \brief version that contains qid field */ static const int kVersionWithQid = 2; /*! \brief default constructor */ MetaInfo() = default; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! * \brief Get the root index of i-th instance. * \param i Instance index. * \return The pre-defined root index of i-th instance. */ inline unsigned GetRoot(size_t i) const { return root_index_.size() != 0 ? root_index_[i] : 0U; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels_.Size()) { return label_order_cache_; } label_order_cache_.resize(labels_.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels_.HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. */ void SetInfo(const char* key, std::string const& interface_str); private: /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_uint index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ Entry(bst_uint index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<size_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; /*! \brief get i-th row from the batch */ inline Inst operator[](size_t i) const { const auto& data_vec = data.HostVector(); const auto& offset_vec = offset.HostVector(); size_t size; // in distributed mode, some partitions may not get any instance for a feature. Therefore // we should set the size as zero if (rabit::IsDistributed() && i + 1 >= offset_vec.size()) { size = 0; } else { size = offset_vec[i + 1] - offset_vec[i]; } return {data_vec.data() + offset_vec[i], static_cast<Inst::index_type>(size)}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return number of instance in the page */ inline size_t Size() const { return offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } SparsePage GetTranspose(int num_columns) const { SparsePage transpose; common::ParallelGroupBuilder<Entry> builder(&transpose.offset.HostVector(), &transpose.data.HostVector()); const int nthread = omp_get_max_threads(); builder.InitBudget(num_columns, nthread); long batch_size = static_cast<long>(this->Size()); // NOLINT(*) #pragma omp parallel for schedule(static) for (long i = 0; i < batch_size; ++i) { // NOLINT(*) int tid = omp_get_thread_num(); auto inst = (*this)[i]; for (bst_uint j = 0; j < inst.size(); ++j) { builder.AddBudget(inst[j].index, tid); } } builder.InitStorage(); #pragma omp parallel for schedule(static) for (long i = 0; i < batch_size; ++i) { // NOLINT(*) int tid = omp_get_thread_num(); auto inst = (*this)[i]; for (bst_uint j = 0; j < inst.size(); ++j) { builder.Push( inst[j].index, Entry(static_cast<bst_uint>(this->base_rowid + i), inst[j].fvalue), tid); } } return transpose; } void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); #pragma omp parallel for schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } } } /*! * \brief Push row block into the page. * \param batch the row batch. */ void Push(const dmlc::RowBlock<uint32_t>& batch); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); /*! * \brief Push one instance into page * \param inst an instance row */ void Push(const Inst &inst); size_t Size() { return offset.Size() - 1; } }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; template<typename T> class BatchIteratorImpl { public: virtual ~BatchIteratorImpl() {} virtual T& operator*() = 0; virtual const T& operator*() const = 0; virtual void operator++() = 0; virtual bool AtEnd() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } void operator++() { CHECK(impl_ != nullptr); ++(*impl_); } T& operator*() { CHECK(impl_ != nullptr); return *(*impl_); } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator& rhs) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(begin_iter) {} BatchIterator<T> begin() { return begin_iter_; } BatchIterator<T> end() { return BatchIterator<T>(nullptr); } private: BatchIterator<T> begin_iter_; }; /*! * \brief This is data structure that user can pass to DMatrix::Create * to create a DMatrix for training, user can create this data structure * for customized Data Loading on single machine. * * On distributed setting, usually an customized dmlc::Parser is needed instead. */ template<typename T> class DataSource : public dmlc::DataIter<T> { public: /*! * \brief Meta information about the dataset * The subclass need to be able to load this correctly from data. */ MetaInfo info; }; /*! * \brief Internal data structured used by XGBoost during training. * There are two ways to create a customized DMatrix that reads in user defined-format. * * - Provide a dmlc::Parser and pass into the DMatrix::Create * - Alternatively, if data can be represented by an URL, define a new dmlc::Parser and register by DMLC_REGISTER_DATA_PARSER; * - This works best for user defined data input source, such as data-base, filesystem. * - Provide a DataSource, that can be passed to DMatrix::Create * This can be used to re-use inmemory data structure into DMatrix. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(); // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief get column density */ virtual float GetColDensity(size_t cidx) = 0; /*! \brief virtual destructor */ virtual ~DMatrix() = default; /*! * \brief Save DMatrix to local file. * The saved file only works for non-sharded dataset(single machine training). * This API is deprecated and dis-encouraged to use. * \param fname The file name to be saved. * \return The created DMatrix. */ virtual void SaveToLocalFile(const std::string& fname); /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto", const size_t page_size = kPageSize); /*! * \brief create a new DMatrix, by wrapping a row_iterator, and meta info. * \param source The source iterator of the data, the create function takes ownership of the source. * \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode. * This can be nullptr for common cases, and in-memory mode will be used. * \return a Created DMatrix. */ static DMatrix* Create(std::unique_ptr<DataSource<SparsePage>>&& source, const std::string& cache_prefix = ""); /*! * \brief Create a DMatrix by loading data from parser. * Parser can later be deleted after the DMatrix i created. * \param parser The input data parser * \param cache_prefix The path to prefix of temporary cache file of the DMatrix when used in external memory mode. * This can be nullptr for common cases, and in-memory mode will be used. * \param page_size Page size for external memory. * \sa dmlc::Parser * \note dmlc-core provides efficient distributed data parser for libsvm format. * User can create and register customized parser to load their own format using DMLC_REGISTER_DATA_PARSER. * See "dmlc-core/include/dmlc/data.h" for detail. * \return A created DMatrix. */ static DMatrix* Create(dmlc::Parser<uint32_t>* parser, const std::string& cache_prefix = "", const size_t page_size = kPageSize); /*! \brief page size 32 MB */ static const size_t kPageSize = 32UL << 20UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches() { return GetRowBatches(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches() { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches() { return GetSortedColumnBatches(); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); } #endif // XGBOOST_DATA_H_
smallppm_exp.h
// expanded smallppm (code is exactly the same as smallppm.cpp but with more comments) #include <cmath> // smallppm, Progressive Photon Mapping by T. Hachisuka #include <cstdlib> // originally smallpt, a path tracer by Kevin Beason, 2008 #include <fstream> #include <iostream> // Usage: ./smallppm 100000 && xv image.ppm namespace photonmap { namespace smallppm { constexpr double PI = 3.14159265358979; // ^^^^^^:number of photons emitted constexpr double ALPHA = 0.7; // the alpha parameter of PPM // Halton sequence with reverse permutation int primes[61] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283}; inline int rev(const int i, const int p) { if (i == 0) return i; else return p - i; } double hal(const int b, int j) { const int p = primes[b]; double h = 0.0, f = 1.0 / (double)p, fct = f; while (j > 0) { h += rev(j % p, p) * fct; j /= p; fct *= f; } return h; } struct Vec { double x, y, z; // vector: position, also color (r,g,b) Vec(double x_ = 0, double y_ = 0, double z_ = 0) { x = x_; y = y_; z = z_; } inline Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); } inline Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); } inline Vec operator+(double b) const { return Vec(x + b, y + b, z + b); } inline Vec operator-(double b) const { return Vec(x - b, y - b, z - b); } inline Vec operator*(double b) const { return Vec(x * b, y * b, z * b); } inline Vec mul(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); } inline Vec norm() { return (*this) * (1.0 / sqrt(x * x + y * y + z * z)); } inline double dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } Vec operator%(Vec& b) { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } }; #define MAX(x, y) ((x > y) ? x : y) struct AABB { Vec min, max; // axis aligned bounding box inline void fit(const Vec& p) { if (p.x < min.x) min.x = p.x; // min if (p.y < min.y) min.y = p.y; // min if (p.z < min.z) min.z = p.z; // min max.x = MAX(p.x, max.x); max.y = MAX(p.y, max.y); max.z = MAX(p.z, max.z); } inline void reset() { min = Vec(1e20, 1e20, 1e20); max = Vec(-1e20, -1e20, -1e20); } }; struct HPoint { Vec f, pos, nrm, flux; double r2; unsigned int n; // n = N / ALPHA in the paper int pix; }; struct List { HPoint* id; List* next; }; List* ListAdd(HPoint* i, List* h) { List* p = new List; p->id = i; p->next = h; return p; } unsigned int num_hash, pixel_index, num_photon; double hash_s; List** hash_grid; List* hitpoints = NULL; AABB hpbbox; // spatial hash function inline unsigned int hash(const int ix, const int iy, const int iz) { return (unsigned int)((ix * 73856093) ^ (iy * 19349663) ^ (iz * 83492791)) % num_hash; } void build_hash_grid(const int w, const int h) { // find the bounding box of all the measurement points hpbbox.reset(); List* lst = hitpoints; while (lst != NULL) { HPoint* hp = lst->id; lst = lst->next; hpbbox.fit(hp->pos); } // heuristic for initial radius Vec ssize = hpbbox.max - hpbbox.min; double irad = ((ssize.x + ssize.y + ssize.z) / 3.0) / ((w + h) / 2.0) * 2.0; std::cout << irad << std::endl; // determine hash table size // we now find the bounding box of all the measurement points inflated by the initial radius hpbbox.reset(); lst = hitpoints; int vphoton = 0; while (lst != NULL) { HPoint* hp = lst->id; lst = lst->next; hp->r2 = irad * irad; hp->n = 0; hp->flux = Vec(); vphoton++; hpbbox.fit(hp->pos - irad); hpbbox.fit(hp->pos + irad); } // make each grid cell two times larger than the initial radius hash_s = 1.0 / (irad * 2.0); num_hash = vphoton; // build the hash table hash_grid = new List*[num_hash]; for (unsigned int i = 0; i < num_hash; i++) hash_grid[i] = NULL; lst = hitpoints; while (lst != NULL) { HPoint* hp = lst->id; lst = lst->next; Vec BMin = ((hp->pos - irad) - hpbbox.min) * hash_s; Vec BMax = ((hp->pos + irad) - hpbbox.min) * hash_s; for (int iz = abs(int(BMin.z)); iz <= abs(int(BMax.z)); iz++) { for (int iy = abs(int(BMin.y)); iy <= abs(int(BMax.y)); iy++) { for (int ix = abs(int(BMin.x)); ix <= abs(int(BMax.x)); ix++) { int hv = hash(ix, iy, iz); hash_grid[hv] = ListAdd(hp, hash_grid[hv]); } } } } } struct Ray { Vec o, d; Ray(){}; Ray(Vec o_, Vec d_) : o(o_), d(d_) {} }; enum Refl_t { DIFF, SPEC, REFR }; // material types, used in radiance() struct Sphere { double rad; Vec p, c; Refl_t refl; Sphere(double r_, Vec p_, Vec c_, Refl_t re_) : rad(r_), p(p_), c(c_), refl(re_) {} inline double intersect(const Ray& r) const { // ray-sphere intersection returns distance Vec op = p - r.o; double t, b = op.dot(r.d), det = b * b - op.dot(op) + rad * rad; if (det < 0) { return 1e20; } else { det = sqrt(det); } return (t = b - det) > 1e-4 ? t : ((t = b + det) > 1e-4 ? t : 1e20); } }; Sphere sph[] = { // Scene: radius, position, color, material Sphere(1e5, Vec(1e5 + 1, 40.8, 81.6), Vec(.75, .25, .25), DIFF), // Left Sphere(1e5, Vec(-1e5 + 99, 40.8, 81.6), Vec(.25, .25, .75), DIFF), // Right Sphere(1e5, Vec(50, 40.8, 1e5), Vec(.75, .75, .75), DIFF), // Back Sphere(1e5, Vec(50, 40.8, -1e5 + 170), Vec(), DIFF), // Front Sphere(1e5, Vec(50, 1e5, 81.6), Vec(.75, .75, .75), DIFF), // Bottomm Sphere(1e5, Vec(50, -1e5 + 81.6, 81.6), Vec(.75, .75, .75), DIFF), // Top Sphere(16.5, Vec(27, 16.5, 47), Vec(1, 1, 1) * .999, SPEC), // Mirror Sphere(16.5, Vec(73, 16.5, 88), Vec(1, 1, 1) * .999, REFR), // Glass Sphere(8.5, Vec(50, 8.5, 60), Vec(1, 1, 1) * .999, DIFF)}; // Middle // tone mapping and gamma correction int toInt(double x) { return int(pow(1 - exp(-x), 1 / 2.2) * 255 + .5); } // find the closet interection inline bool intersect(const Ray& r, double& t, int& id) { int n = sizeof(sph) / sizeof(Sphere); double d, inf = 1e20; t = inf; for (int i = 0; i < n; i++) { d = sph[i].intersect(r); if (d < t) { t = d; id = i; } } return t < inf; } // generate a photon ray from the point light source with QMC void genp(Ray* pr, Vec* f, int i) { *f = Vec(2500, 2500, 2500) * (PI * 4.0); // flux double p = 2. * PI * hal(0, i), t = 2. * acos(sqrt(1. - hal(1, i))); double st = sin(t); pr->d = Vec(cos(p) * st, cos(t), sin(p) * st); pr->o = Vec(50, 60, 85); } void trace(const Ray& r, int dpt, bool m, const Vec& fl, const Vec& adj, int i) { double t; int id; dpt++; if (!intersect(r, t, id) || (dpt >= 20)) return; int d3 = dpt * 3; const Sphere& obj = sph[id]; Vec x = r.o + r.d * t, n = (x - obj.p).norm(), f = obj.c; Vec nl = n.dot(r.d) < 0 ? n : n * -1; double p = f.x > f.y && f.x > f.z ? f.x : f.y > f.z ? f.y : f.z; if (obj.refl == DIFF) { // Lambertian // use QMC to sample the next direction double r1 = 2. * PI * hal(d3 - 1, i), r2 = hal(d3 + 0, i); double r2s = sqrt(r2); Vec w = nl, u = ((fabs(w.x) > .1 ? Vec(0, 1) : Vec(1)) % w).norm(); Vec v = w % u, d = (u * cos(r1) * r2s + v * sin(r1) * r2s + w * sqrt(1 - r2)).norm(); if (m) { // eye ray // store the measurment point HPoint* hp = new HPoint; hp->f = f.mul(adj); hp->pos = x; hp->nrm = n; hp->pix = pixel_index; hitpoints = ListAdd(hp, hitpoints); } else { // photon ray // find neighboring measurement points and accumulate flux via progressive density estimation Vec hh = (x - hpbbox.min) * hash_s; int ix = abs(int(hh.x)), iy = abs(int(hh.y)), iz = abs(int(hh.z)); // strictly speaking, we should use #pragma omp critical here. // it usually works without an artifact due to the fact that photons are // rarely accumulated to the same measurement points at the same time (especially with QMC). // it is also significantly faster. { List* hp = hash_grid[hash(ix, iy, iz)]; while (hp != NULL) { HPoint* hitpoint = hp->id; hp = hp->next; Vec v = hitpoint->pos - x; // check normals to be closer than 90 degree (avoids some edge brightning) if ((hitpoint->nrm.dot(n) > 1e-3) && (v.dot(v) <= hitpoint->r2)) { // unlike N in the paper, hitpoint->n stores "N / ALPHA" to make it an integer value double g = (hitpoint->n * ALPHA + ALPHA) / (hitpoint->n * ALPHA + 1.0); hitpoint->r2 = hitpoint->r2 * g; hitpoint->n++; hitpoint->flux = (hitpoint->flux + hitpoint->f.mul(fl) * (1. / PI)) * g; } } } if (hal(d3 + 1, i) < p) trace(Ray(x, d), dpt, m, f.mul(fl) * (1. / p), adj, i); } } else if (obj.refl == SPEC) { // mirror trace(Ray(x, r.d - n * 2.0 * n.dot(r.d)), dpt, m, f.mul(fl), f.mul(adj), i); } else { // glass Ray lr(x, r.d - n * 2.0 * n.dot(r.d)); bool into = (n.dot(nl) > 0.0); double nc = 1.0, nt = 1.5, nnt = into ? nc / nt : nt / nc, ddn = r.d.dot(nl), cos2t; // total internal reflection if ((cos2t = 1 - nnt * nnt * (1 - ddn * ddn)) < 0) return trace(lr, dpt, m, fl, adj, i); Vec td = (r.d * nnt - n * ((into ? 1 : -1) * (ddn * nnt + sqrt(cos2t)))).norm(); double a = nt - nc, b = nt + nc, R0 = a * a / (b * b), c = 1 - (into ? -ddn : td.dot(n)); double Re = R0 + (1 - R0) * c * c * c * c * c, P = Re; Ray rr(x, td); Vec fa = f.mul(adj); if (m) { // eye ray (trace both rays) trace(lr, dpt, m, fl, fa * Re, i); trace(rr, dpt, m, fl, fa * (1.0 - Re), i); } else { // photon ray (pick one via Russian roulette) (hal(d3 - 1, i) < P) ? trace(lr, dpt, m, fl, fa, i) : trace(rr, dpt, m, fl, fa, i); } } } int render() { // samps * 1000 photon paths will be traced int w = 1024, h = 768, samps = 1000; // trace eye rays and store measurement points Ray cam(Vec(50, 48, 295.6), Vec(0, -0.042612, -1).norm()); Vec cx = Vec(w * .5135 / h), cy = (cx % cam.d).norm() * .5135, *c = new Vec[w * h], vw; for (int y = 0; y < h; y++) { fprintf(stderr, "\rHitPointPass %5.2f%%", 100.0 * y / (h - 1)); for (int x = 0; x < w; x++) { pixel_index = x + y * w; Vec d = cx * ((x + 0.5) / w - 0.5) + cy * (-(y + 0.5) / h + 0.5) + cam.d; trace(Ray(cam.o + d * 140, d.norm()), 0, true, Vec(), Vec(1, 1, 1), 0); } } fprintf(stderr, "\n"); // build the hash table over the measurement points build_hash_grid(w, h); // trace photon rays with multi-threading num_photon = samps; vw = Vec(1, 1, 1); #pragma omp parallel for schedule(dynamic, 1) for (int i = 0; i < num_photon; i++) { double p = 100. * (i + 1) / num_photon; fprintf(stderr, "\rPhotonPass %5.2f%%", p); int m = 1000 * i; Ray r; Vec f; for (int j = 0; j < 1000; j++) { genp(&r, &f, m + j); trace(r, 0, 0 > 1, f, vw, m + j); } } // density estimation List* lst = hitpoints; while (lst != NULL) { HPoint* hp = lst->id; lst = lst->next; int i = hp->pix; c[i] = c[i] + hp->flux * (1.0 / (PI * hp->r2 * num_photon * 1000.0)); } // save the image after tone mapping and gamma correction FILE* f = fopen("image.ppm", "w"); fprintf(f, "P3\n%d %d\n%d\n", w, h, 255); for (int i = 0; i < w * h; i++) { fprintf(f, "%d %d %d ", toInt(c[i].x), toInt(c[i].y), toInt(c[i].z)); } } } // namespace smallppm } // namespace photonmap
gmres.h
/** * * @file gmres.h * * @copyright 2018 King Abdullah University of Science and Technology (KAUST). * All rights reserved. * * @author Mustafa Abduljabbar [mustafa.abduljabbar@kaust.edu.sa] and Mohammed Al Farhan [mohammed.farhan@kaust.edu.sa]. * **/ #ifndef GMRES #define GMRES #include "utils.h" #include "acoustics.h" #include "self_metadata.h" using namespace bemfmm; using namespace bemfmm::utils; void givens_loc(d_complex_t const& z1, d_complex_t const& z2, double& c, d_complex_t& s) { double abs_z1 = std::abs(z1); double abs_z2 = std::abs(z2); double vnormz = sqrt(abs_z1*abs_z1 + abs_z2*abs_z2); if(abs_z1!= 0) { c = abs_z1/vnormz; s = z2 / z1 * c; } else if(abs_z2 != 0) { c = 0.0; s = z2 / abs_z2; } else { c = 1.0; s = 0.0; } } double scnrm22(int nd, d_complex_t_vec const& cy9) { d_complex_t result = 0.0; for (int i = 0; i < nd; ++i) { result += std::conj(cy9[i]) * cy9[i]; } d_complex_t sum; MPI_Allreduce(&result, &sum, 1, MPI_DOUBLE_COMPLEX, MPI_SUM, MPI_COMM_WORLD); result = sqrt(sum); return std::real(result); } void cutrisub(int n, d_complex_t_vec_2d const& cma, d_complex_t_vec cvx, d_complex_t_vec& cvy) { for (int i = n-1; i >= 0; --i) { cvy[i] = cvx[i]; for (int j = i+1; j < n; ++j) { cvy[i]-= cma[j][i] * cvy[j]; } cvy[i] /= cma[i][i]; } } template <typename VecArr> void addSelfCorrections(VecArr& target, VecArr const& source, int partition_size, int32_vec const& patches, d_complex_t_vec const& self, int ntriangle, int nipp) { int selfIndex = 0; for (int i = 0; i < partition_size; ++i) { for (int j = 0; j < nipp; ++j) { target[i] += source[(i/nipp)*nipp + j]*self[selfIndex++]; } } } #if USE_PART void buildSingularityCorrections(self_metadata const& metadata, int partition_size, int32_vec const& patches, int16_vec const& pointlocs, int ntriangle, int nipp, d_complex_t_vec & self) { self.resize(partition_size*nipp, 0); double ppp,qqq,aa1[3]={0.0, 0.0, 0.0},aa2[3]={0.0, 0.0, 0.0},bb1[3]={0.0, 0.0, 0.0},vmmm[3]={0.0, 0.0, 0.0},valf[3]={0.0, 0.0, 0.0}; double veuv1[2]={0.0, 0.0},veuv2[2]={0.0, 0.0}; double vcsik[2]={0.0, 0.0}; double vuo[3]={0.0, 0.0, 0.0},vvo[3]={0.0, 0.0, 0.0}; d_complex_t I(0.,1.); d_vector const& wwo = metadata.wwo; d_vector const& xxo = metadata.xxo; d_vector const& wws = metadata.wws; d_vector const& xxs = metadata.xxs; d_vector const& x = metadata.x; d_vector const& y = metadata.y; d_vector const& z = metadata.z; int const& nlqp = metadata.nlqp; double const* ipolymatrix = metadata.ipolymatrix; d_vector_2d const& vcsio = metadata.vcsio; d_complex_t const& cjvk = metadata.cjvk; double vrrr[3]={0.0, 0.0, 0.0}; double vrr0[3]={0.0, 0.0, 0.0}; d_ptr ruv1, ruv2; #pragma omp parallel for private(vrrr, vrr0, ruv1, ruv2, ppp, qqq, aa1, aa2, bb1, vmmm, valf, veuv1, veuv2, vcsik, vuo, vvo) for (int i = 0; i < partition_size; ++i) { const int patch = i/nipp; const int index = patch*nipp; // for(int nn = 0; nn < 6; nn++) { // std::cout << x[index+nn] << " , " << y[index+nn] << " , " << z[index+nn] << std::endl ; // } aa1[0]=(x[index+0]+x[index+2]-2.0*x[index+5]); aa2[0]=(x[index+2]+x[index+3]-x[index+4]-x[index+5]); bb1[0]=(x[index+1]+x[index+2]-2.0*x[index+4]); aa1[1]=(y[index+0]+y[index+2]-2.0*y[index+5]); aa2[1]=(y[index+2]+y[index+3]-y[index+4]-y[index+5]); bb1[1]=(y[index+1]+y[index+2]-2.0*y[index+4]); aa1[2]=(z[index+0]+z[index+2]-2.0*z[index+5]); aa2[2]=(z[index+2]+z[index+3]-z[index+4]-z[index+5]); bb1[2]=(z[index+1]+z[index+2]-2.0*z[index+4]); double polyvector[nipp], rrr, rr0; int k = pointlocs[i]; //for (int k = 0; k < nipp; ++k) { utils::getUV_vector(vcsio[k], x, y, z, patch, nipp, vuo, vvo); for (int l = 0; l < nipp; ++l) { d_complex_t z00 = 0.0; for (int d = 0; d < 3; ++d) { switch(d) { case 0: ruv1 = utils::vos2; ruv2 = utils::vos3; break; case 1: ruv1 = utils::vos3; ruv2 = utils::vos1; break; case 2: ruv1 = utils::vos1; ruv2 = utils::vos2; break; } veuv1[0] = vcsio[k][0]- ruv2[0]; veuv1[1] = vcsio[k][1]- ruv2[1]; veuv2[0] = ruv1[0] - ruv2[0]; veuv2[1] = ruv1[1] - ruv2[1]; double jacob_star = std::abs(veuv1[0]*veuv2[1]-veuv1[1]*veuv2[0]); for (int kk = 0; kk < nlqp; ++kk) { ppp = ruv1[0]-vcsio[k][0]+(ruv2[0]-ruv1[0])*xxo[kk]; qqq = ruv1[1]-vcsio[k][1]+(ruv2[1]-ruv1[1])*xxo[kk]; for (int d = 0; d < 3; ++d) { vmmm[d] = 2.0 * aa1[d] * ppp * ppp + 2.0 * bb1[d] * qqq * qqq + 4.0 * aa2[d] * ppp * qqq; valf[d] = vuo[d] * ppp + vvo[d] * qqq; } for (int mm = 0; mm < nlqp; ++mm) { vcsik[0]=(ruv1[0]-vcsio[k][0])*xxs[mm]+(ruv2[0]-ruv1[0])*xxo[kk]*xxs[mm]+vcsio[k][0]; vcsik[1]=(ruv1[1]-vcsio[k][1])*xxs[mm]+(ruv2[1]-ruv1[1])*xxo[kk]*xxs[mm]+vcsio[k][1]; for (int d = 0; d < 3; ++d) { vrr0[d]=-xxs[mm] * vmmm[d] - valf[d]; vrrr[d]= xxs[mm] * vrr0[d]; } utils::rootDotProduct(vrrr,vrrr,rrr); utils::rootDotProduct(vrr0,vrr0,rr0); double ipolator = 0.0; if(nipp == 6) { polyvector[0] = 1.0; polyvector[1] = vcsik[0]; polyvector[2] = vcsik[1]; polyvector[3] = vcsik[0] * vcsik[1]; polyvector[4] = vcsik[0] * vcsik[0]; polyvector[5] = vcsik[1] * vcsik[1]; for (int kkk = 0; kkk < 6; ++kkk) { ipolator+=ipolymatrix[kkk*nipp+l]*polyvector[kkk]; } z00 += wwo[kk]*wws[mm]*exp(I*cjvk*rrr)/(4.0*M_PI*rr0)*ipolator*jacob_star; } } } } self[(i*nipp) + l] += z00; } } } } #else void buildSingularityCorrections(self_metadata const& metadata, int partition_size, int32_vec const& patches, int16_vec const& pointlocs, int ntriangle, int nipp, d_complex_t_vec & self) { self.resize(partition_size*nipp, 0); double ppp,qqq,aa1[3]={0.0, 0.0, 0.0},aa2[3]={0.0, 0.0, 0.0},bb1[3]={0.0, 0.0, 0.0},vmmm[3]={0.0, 0.0, 0.0},valf[3]={0.0, 0.0, 0.0}; double veuv1[2]={0.0, 0.0},veuv2[2]={0.0, 0.0}; double vcsik[2]={0.0, 0.0}; double vuo[3]={0.0, 0.0, 0.0},vvo[3]={0.0, 0.0, 0.0}; d_complex_t I(0.,1.); d_vector const& wwo = metadata.wwo; d_vector const& xxo = metadata.xxo; d_vector const& wws = metadata.wws; d_vector const& xxs = metadata.xxs; int const& nlqp = metadata.nlqp; double const* ipolymatrix = metadata.ipolymatrix; int** const& nsupan = metadata.nsupan; double** const& sunod = metadata.sunod; d_vector_2d const& vcsio = metadata.vcsio; d_complex_t const& cjvk = metadata.cjvk; double vrrr[3]={0.0, 0.0, 0.0}; double vrr0[3]={0.0, 0.0, 0.0}; d_ptr ruv1, ruv2; #pragma omp parallel for private(vrrr, vrr0, ruv1, ruv2, ppp, qqq, aa1, aa2, bb1, vmmm, valf, veuv1, veuv2, vcsik, vuo, vvo) for (int i = 0; i < partition_size; ++i) { int* nodeo_sn = nsupan[patches[i]]; double polyvector[nipp], rrr, rr0; for (int d = 0; d < 3; ++d) { aa1[d]=(sunod[d][nodeo_sn[0]]+sunod[d][nodeo_sn[2]]-2.0*sunod[d][nodeo_sn[5]]); aa2[d]=(sunod[d][nodeo_sn[2]]+sunod[d][nodeo_sn[3]]-sunod[d][nodeo_sn[4]]-sunod[d][nodeo_sn[5]]); bb1[d]=(sunod[d][nodeo_sn[1]]+sunod[d][nodeo_sn[2]]-2.0*sunod[d][nodeo_sn[4]]); } int k = pointlocs[i]; //for (int k = 0; k < nipp; ++k) { utils::getUV_vector(vcsio[k], nodeo_sn, sunod, nipp, vuo, vvo); for (int l = 0; l < nipp; ++l) { d_complex_t z00 = 0.0; for (int d = 0; d < 3; ++d) { switch(d) { case 0: ruv1 = utils::vos2; ruv2 = utils::vos3; break; case 1: ruv1 = utils::vos3; ruv2 = utils::vos1; break; case 2: ruv1 = utils::vos1; ruv2 = utils::vos2; break; } veuv1[0] = vcsio[k][0]- ruv2[0]; veuv1[1] = vcsio[k][1]- ruv2[1]; veuv2[0] = ruv1[0] - ruv2[0]; veuv2[1] = ruv1[1] - ruv2[1]; double jacob_star = std::abs(veuv1[0]*veuv2[1]-veuv1[1]*veuv2[0]); for (int kk = 0; kk < nlqp; ++kk) { ppp = ruv1[0]-vcsio[k][0]+(ruv2[0]-ruv1[0])*xxo[kk]; qqq = ruv1[1]-vcsio[k][1]+(ruv2[1]-ruv1[1])*xxo[kk]; for (int d = 0; d < 3; ++d) { vmmm[d] = 2.0 * aa1[d] * ppp * ppp + 2.0 * bb1[d] * qqq * qqq + 4.0 * aa2[d] * ppp * qqq; valf[d] = vuo[d] * ppp + vvo[d] * qqq; } for (int mm = 0; mm < nlqp; ++mm) { vcsik[0]=(ruv1[0]-vcsio[k][0])*xxs[mm]+(ruv2[0]-ruv1[0])*xxo[kk]*xxs[mm]+vcsio[k][0]; vcsik[1]=(ruv1[1]-vcsio[k][1])*xxs[mm]+(ruv2[1]-ruv1[1])*xxo[kk]*xxs[mm]+vcsio[k][1]; for (int d = 0; d < 3; ++d) { vrr0[d]=-xxs[mm] * vmmm[d] - valf[d]; vrrr[d]= xxs[mm] * vrr0[d]; } utils::rootDotProduct(vrrr,vrrr,rrr); utils::rootDotProduct(vrr0,vrr0,rr0); double ipolator = 0.0; if(nipp == 6) { polyvector[0] = 1.0; polyvector[1] = vcsik[0]; polyvector[2] = vcsik[1]; polyvector[3] = vcsik[0] * vcsik[1]; polyvector[4] = vcsik[0] * vcsik[0]; polyvector[5] = vcsik[1] * vcsik[1]; for (int kkk = 0; kkk < 6; ++kkk) { ipolator+=ipolymatrix[kkk*nipp+l]*polyvector[kkk]; } z00 += wwo[kk]*wws[mm]*exp(I*cjvk*rrr)/(4.0*M_PI*rr0)*ipolator*jacob_star; } } } } self[(i*nipp) + l] += z00; } } } } #endif int my_gmres(int partition_size, int ntriangle, int nipp, int nitermax, double precis, d_complex_t_vec& crhs, d_complex_t_vec& rj, d_complex_t_vec& weights, self_metadata const& metadata, int fmm_verbose, bool verify, int gmresRestart, int32_vec const& patches, int16_vec const& pointlocs, d_complex_t_vec const& zzsparse,int mpirank) { const int m = gmresRestart; const int nd = partition_size; //const int sample_direct = 300; const int sample_direct = 500; int itertotal, iterout, n, k; d_complex_t_vec& cx = rj; d_complex_t_vec cb(m+1,0.0); d_complex_t_vec chr(m+1,0.0); d_complex_t_vec self; d_complex_t_vec_2d cy(m+1, d_complex_t_vec(nd,0.0)); d_complex_t_vec_2d ch(m, d_complex_t_vec(m+1,0.0)); d_complex_t_vec_2d cy_t(nd,d_complex_t_vec(m,0.0)); d_complex_t_vec test(nd ,0); std::vector<int> sample_addresses(sample_direct ,0); buildSingularityCorrections(metadata, nd, patches, pointlocs, ntriangle, nipp, self); bool init = true; d_complex_t_vec cs(m,0.0); d_vector rc(m,0.0); d_complex_t ctemp = 0.0; double ay0, be, bea; alogger::startTimer("GMRES Time"); ay0 = scnrm22(nd, crhs); itertotal = 0; for (iterout = 0; iterout < nitermax; ++iterout) { itertotal++; if(!init) { alogger::startTimer("FMM Time"); exafmm::FMM_MatVec(cy[m], cx, weights, fmm_verbose); addSelfCorrections(cy[m], cx, nd, patches, self, ntriangle, nipp); alogger::stopResetTimer("FMM Time"); } else init = false; for (int i = 0; i < nd; ++i) cy[m][i] = crhs[i] - cy[m][i]; cb[0] = scnrm22(nd,cy[m]); be = std::real(cb[0]/ay0); if(alogger::verbose) { std::cout << std::setw(alogger::stringLength) << std::fixed << std::left << "Iter Outer" << " : " << iterout << std::endl; std::cout << std::setw(alogger::stringLength) << std::fixed << std::left << "Iter Total" << " : " << itertotal << std::endl; std::cout << std::setw(alogger::stringLength) << std::fixed << std::left << "GMRES Residual Norm" << " : " << std::setprecision(9) << std::scientific<< be <<std::endl; std::cout << "==========================================================" << std::fixed << std::left <<std::endl; } if(be < precis || itertotal > nitermax) break; for (int i = 0; i < nd; ++i) cy[0][i] = cy[m][i]/cb[0]; for (n = 0; n < m; ++n) { alogger::startTimer("Iter Time"); itertotal++; alogger::startTimer("FMM Time"); exafmm::FMM_MatVec(cy[n+1], cy[n], weights, fmm_verbose); if(verify) { alogger::stopTimer("Solving AX=B",0); exafmm::DirectSample(sample_direct, test, sample_addresses); alogger::logNumericalError(test, cy[n+1], sample_addresses, sample_direct ,"FMM vs. Direct"); alogger::startTimer("Solving AX=B"); } addSelfCorrections(cy[n+1], cy[n], partition_size, patches, self, ntriangle, nipp); alogger::stopResetTimer("FMM Time"); for (k = 0; k <= n; ++k) { dot_product(cy[k],cy[n+1],ch[n][k],nd); } MPI_Allreduce((d_complex_t_ptr) &(ch[n][0]), (d_complex_t_ptr) &(chr[0]), n+1, MPI_DOUBLE_COMPLEX, MPI_SUM, MPI_COMM_WORLD); for (k = 0; k <= n; ++k) { ch[n][k] = chr[k]; for (int i = 0; i < nd; ++i) cy[n+1][i]-=cy[k][i]*ch[n][k]; } ch[n][n+1] = scnrm22(nd,cy[n+1]); if(n < m) for (int i = 0; i < nd; ++i) cy[n+1][i]/=ch[n][n+1]; if(n != 0) { for (int k = 0; k <= n-1; ++k) { ctemp = rc[k]*ch[n][k+1] - cs[k]*ch[n][k]; ch[n][k] = rc[k]*ch[n][k] + std::conj(cs[k]) * ch[n][k+1]; ch[n][k+1] = ctemp; } } givens_loc(ch[n][n],ch[n][n+1],rc[n],cs[n]); cb[n+1] = -cs[n]*cb[n]; cb[n] = rc[n]*cb[n]; ch[n][n] = rc[n]*ch[n][n]+ std::conj(cs[n])*ch[n][n+1]; ch[n][n+1] = 0; bea = std::abs(cb[n+1])/ay0; if(alogger::verbose) { std::cout << std::setw(alogger::stringLength) << std::fixed << std::left << "Iteration" << " : " << itertotal << std::endl; std::cout << std::setw(alogger::stringLength) << std::fixed << std::left << "GMRES Residual Norm" << " : " << std::setprecision(9) << std::scientific<< bea <<std::endl; std::cout << "==========================================================" << std::fixed << std::left <<std::endl; } if(n == m-1 || bea < precis) { cutrisub(n+1, ch, cb, cb); transpose(cy,m, nd, cy_t); matVecMul(cy_t,cb,nd,n+1,cy[m]); for (int i = 0; i < nd; ++i) cx[i] = cx[i] + cy[m][i]; break; } alogger::stopResetTimer("Iter Time"); } alogger::stopTimer("GMRES Time"); } return 0; } #endif
GB_binop__eq_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_08__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_02__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_04__eq_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_int64) // A*D function (colscale): GB (_AxD__eq_int64) // D*A function (rowscale): GB (_DxB__eq_int64) // C+=B function (dense accum): GB (_Cdense_accumB__eq_int64) // C+=b function (dense accum): GB (_Cdense_accumb__eq_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_int64) // C=scalar+B GB (_bind1st__eq_int64) // C=scalar+B' GB (_bind1st_tran__eq_int64) // C=A+scalar GB (_bind2nd__eq_int64) // C=A'+scalar GB (_bind2nd_tran__eq_int64) // C type: bool // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_INT64 || GxB_NO_EQ_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
RITmain.h
#ifndef RITmain #define RITmain #include<vector> #include<algorithm> #include<random> #include<set> #include<Rcpp.h> #include<chrono> #ifdef _OPENMP #include <omp.h> #endif #include<math.h> #include "RaggedArray.h" #include "RITaux.h" using namespace std; // [[Rcpp::plugins(cpp11)]] // [[Rcpp::plugins(openmp)]] set<vector<int> > RIT_basic(RaggedArray &x, NumericVector &weights, const int L, const double branch, const int depth, const int n_trees, unsigned const int min_inter_sz, const int n_cores, const int n) { // Set up parameters const int fl_branch=floor(branch); const int cl_branch=ceil(branch); const double branch_diff=branch-fl_branch; int i1, i2, i; // Set up vector of seeds for RNG vector<unsigned int> seeds(n_cores); for (int i=0; i<n_cores; i++) { seeds[i] = chrono::high_resolution_clock::now().time_since_epoch().count()*(i+1); } // Set up output objects set<vector<int> > total_candidate_interactions; //union of candidate interactions for all trees const int depthFinal = depth - 2; #ifdef _OPENMP omp_set_num_threads(n_cores); #endif #pragma omp parallel { // Set up RNG for each thread #ifdef _OPENMP mt19937_64 mt(seeds[omp_get_thread_num()]); //Use Mersenne Twister as RNG #else mt19937_64 mt(seeds[0]); //Use Mersenne Twister as RNG #endif discrete_distribution<int> r_obs(weights.begin(), weights.end()); uniform_real_distribution<> r_unif(0,1); //use for random number of branches #pragma omp for schedule(static) nowait for (int tree = 0; tree < n_trees; tree++) { set<vector<int> > candidate_interactions; //set of candidate interactions for each tree vector<int> root; // first intersection computed by walking along arrays as sets will be of // similar size i1 = r_obs(mt); i2 = r_obs(mt); set_intersection(x.begin(i1), x.end(i1), x.begin(i2), x.end(i2), back_inserter(root)); if (root.size() >= min_inter_sz) { // interactions must have size at least min_inter_sz if ((root.size() > min_inter_sz) && (depth > 2)) { // depth >= 3 // Only run this code when the initial intersection produces an interaction of size greater than min_inter_sz // initialise parents vector<RaggedArray> parents(depthFinal); parents[0].push_back(root.begin(), root.end()); for (int depth = 1; depth <= depthFinal; depth++) { for (int node = 0; node < parents[depth-1].nrow(); node++) { int cur_branch; //if(floor(branch)==branch){cur_branch=branch;} //if branch is an integer if (r_unif(mt) < branch_diff) { cur_branch=cl_branch; } //if random number in (0,1) is less than decimal part of branch else { cur_branch=fl_branch; } //if random number in (0,1) is greater than decimal part of branch for (int k = 0; k < cur_branch; k++) { i = r_obs(mt); vector<int> temp_interaction = binary_intersect(x.begin(i), x.end(i),parents[depth-1].begin(node), parents[depth-1].end(node)); if (temp_interaction.size() >= min_inter_sz) { if ((depth == depthFinal) || (temp_interaction.size() == min_inter_sz)) { candidate_interactions.insert(temp_interaction); } else { parents[depth].push_back(temp_interaction.begin(), temp_interaction.end()); } } } } } } else { candidate_interactions.insert(root); } } #pragma omp critical(update_total_candidate_interactions) { total_candidate_interactions.insert(candidate_interactions.begin(), candidate_interactions.end()); } } } return total_candidate_interactions; } // [[Rcpp::plugins(cpp11)]] // [[Rcpp::plugins(openmp)]] set<vector<int> > RIT_minhash(RaggedArray &x, const int L, const double branch, const int depth, const int n_trees, const double theta0, const double theta1, unsigned const int min_inter_sz, const int n_cores, const int n, int** H0t, const double n0_plus_1_over_n0, const double recip_n0_plus_1) { // Set up parameters const int fl_branch=floor(branch); const int cl_branch=ceil(branch); const double branch_diff=branch-fl_branch; // Set up vector of seeds for RNG vector<unsigned int> seeds(n_cores); for (int i=0; i<n_cores; i++) { seeds[i] = chrono::high_resolution_clock::now().time_since_epoch().count()*(i+1); } // Set up output objects set<vector<int> > total_candidate_interactions; //union of candidate interactions for all trees const int depthFinal = depth - 2; #ifdef _OPENMP omp_set_num_threads(n_cores); #endif #pragma omp parallel { // Set up RNG for each thread #ifdef _OPENMP mt19937_64 mt(seeds[omp_get_thread_num()]); //Use Mersenne Twister as RNG #else mt19937_64 mt(seeds[0]); //Use Mersenne Twister as RNG #endif uniform_int_distribution<int> r_obs(0,n-1); uniform_real_distribution<> r_unif(0,1); //use for random number of branches #pragma omp for schedule(static) nowait for (int tree = 0; tree < n_trees; tree++) { set<vector<int> > candidate_interactions; //set of candidate interactions from each tree vector<int> root; // first intersection computed by walking along arrays as sets will be of similar size int i1, i2; i1 = r_obs(mt); i2 = r_obs(mt); set_intersection(x.begin(i1), x.end(i1), x.begin(i2), x.end(i2), back_inserter(root)); if ((root.size() >= min_inter_sz) && (PrevEst(root, H0t, L, n0_plus_1_over_n0, recip_n0_plus_1) < theta0)) { // Class 0 prevalence must be low // interactions must have size at least min_inter_sz if ((root.size() > min_inter_sz) && (depth > 2)) { // depth >= 3 // Only run this code when the initial intersection produces an interaction of size greater than min_inter_sz // initialise parents vector<RaggedArray> parents(depthFinal); parents[0].push_back(root.begin(), root.end()); for (int depth = 1; depth <= depthFinal; depth++) { for (int node = 0; node < parents[depth-1].nrow(); node++) { int cur_branch; if (r_unif(mt) < branch_diff) { cur_branch=cl_branch; } //if random number in (0,1) is less than decimal part of branch else { cur_branch=fl_branch; } //if random number in (0,1) is greater than decimal part of branch for (int k = 0; k < cur_branch; k++) { int i = r_obs(mt); vector<int> temp_interaction = binary_intersect(x.begin(i), x.end(i),parents[depth-1].begin(node), parents[depth-1].end(node)); if ((temp_interaction.size() >= min_inter_sz) && (PrevEst(temp_interaction, H0t, L, n0_plus_1_over_n0, recip_n0_plus_1)< theta0)) { if ((depth == depthFinal) || (temp_interaction.size() == min_inter_sz)) { candidate_interactions.insert(temp_interaction); } else { parents[depth].push_back(temp_interaction.begin(), temp_interaction.end()); } } } } } } else { candidate_interactions.insert(root); } } #pragma omp critical(update_total_candidate_interactions) { total_candidate_interactions.insert(candidate_interactions.begin(), candidate_interactions.end()); } } } return total_candidate_interactions; } #endif
requantize_leakyrelu_pack8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void requantize_leakyrelu_pack8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& scale_in_data, const Mat& scale_out_data, const Mat& bias_data, float slope, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int size = w * h; int scale_in_data_size = scale_in_data.w; int scale_out_data_size = scale_out_data.w; int bias_data_size = bias_data.w; // int8(leakyrelu(v * scale_in, slope) * scale_out) // int8_leakyrelu(v * (scale_in * scale_out), slope) // int8(leakyrelu(v * scale_in + bias, slope) * scale_out) // int8_leakyrelu(v * (scale_in * scale_out) + (bias * scale_out), slope) if (bias_data_size == 0) { #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { const int* intptr = bottom_blob.channel(q); signed char* ptr = top_blob.channel(q); float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8); float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4); float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8); float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4); float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0); float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1); float32x4_t _slope = vdupq_n_f32(slope); int i = 0; #if __aarch64__ for (; i + 3 < size; i += 4) { float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr)); float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4)); float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8)); float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12)); float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16)); float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20)); float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24)); float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28)); _v0 = vmulq_f32(_v0, _scale0); _v1 = vmulq_f32(_v1, _scale1); _v2 = vmulq_f32(_v2, _scale0); _v3 = vmulq_f32(_v3, _scale1); _v4 = vmulq_f32(_v4, _scale0); _v5 = vmulq_f32(_v5, _scale1); _v6 = vmulq_f32(_v6, _scale0); _v7 = vmulq_f32(_v7, _scale1); vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope)); vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope)); vst1_s8(ptr + 16, float2int8leakyrelu(_v4, _v5, _slope)); vst1_s8(ptr + 24, float2int8leakyrelu(_v6, _v7, _slope)); intptr += 32; ptr += 32; } #endif // __aarch64__ for (; i + 1 < size; i += 2) { float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr)); float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4)); float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8)); float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12)); _v0 = vmulq_f32(_v0, _scale0); _v1 = vmulq_f32(_v1, _scale1); _v2 = vmulq_f32(_v2, _scale0); _v3 = vmulq_f32(_v3, _scale1); vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope)); vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope)); intptr += 16; ptr += 16; } for (; i < size; i++) { float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr)); float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4)); _v0 = vmulq_f32(_v0, _scale0); _v1 = vmulq_f32(_v1, _scale1); vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope)); intptr += 8; ptr += 8; } } } else { #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { const int* intptr = bottom_blob.channel(q); signed char* ptr = top_blob.channel(q); float32x4_t _scale_in0 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8); float32x4_t _scale_in1 = scale_in_data_size == 1 ? vdupq_n_f32(scale_in_data[0]) : vld1q_f32((const float*)scale_in_data + q * 8 + 4); float32x4_t _scale_out0 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8); float32x4_t _scale_out1 = scale_out_data_size == 1 ? vdupq_n_f32(scale_out_data[0]) : vld1q_f32((const float*)scale_out_data + q * 8 + 4); float32x4_t _bias0 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8); float32x4_t _bias1 = bias_data_size == 1 ? vdupq_n_f32(bias_data[0]) : vld1q_f32((const float*)bias_data + q * 8 + 4); float32x4_t _scale0 = vmulq_f32(_scale_in0, _scale_out0); float32x4_t _scale1 = vmulq_f32(_scale_in1, _scale_out1); _bias0 = vmulq_f32(_bias0, _scale_out0); _bias1 = vmulq_f32(_bias1, _scale_out1); float32x4_t _slope = vdupq_n_f32(slope); int i = 0; #if __aarch64__ for (; i + 3 < size; i += 4) { float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr)); float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4)); float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8)); float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12)); float32x4_t _v4 = vcvtq_f32_s32(vld1q_s32(intptr + 16)); float32x4_t _v5 = vcvtq_f32_s32(vld1q_s32(intptr + 20)); float32x4_t _v6 = vcvtq_f32_s32(vld1q_s32(intptr + 24)); float32x4_t _v7 = vcvtq_f32_s32(vld1q_s32(intptr + 28)); _v0 = vfmaq_f32(_bias0, _v0, _scale0); _v1 = vfmaq_f32(_bias1, _v1, _scale1); _v2 = vfmaq_f32(_bias0, _v2, _scale0); _v3 = vfmaq_f32(_bias1, _v3, _scale1); _v4 = vfmaq_f32(_bias0, _v4, _scale0); _v5 = vfmaq_f32(_bias1, _v5, _scale1); _v6 = vfmaq_f32(_bias0, _v6, _scale0); _v7 = vfmaq_f32(_bias1, _v7, _scale1); vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope)); vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope)); vst1_s8(ptr + 16, float2int8leakyrelu(_v4, _v5, _slope)); vst1_s8(ptr + 24, float2int8leakyrelu(_v6, _v7, _slope)); intptr += 32; ptr += 32; } #endif // __aarch64__ for (; i + 1 < size; i += 2) { #if __aarch64__ float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr)); float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4)); float32x4_t _v2 = vcvtq_f32_s32(vld1q_s32(intptr + 8)); float32x4_t _v3 = vcvtq_f32_s32(vld1q_s32(intptr + 12)); _v0 = vfmaq_f32(_bias0, _v0, _scale0); _v1 = vfmaq_f32(_bias1, _v1, _scale1); _v2 = vfmaq_f32(_bias0, _v2, _scale0); _v3 = vfmaq_f32(_bias1, _v3, _scale1); vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope)); vst1_s8(ptr + 8, float2int8leakyrelu(_v2, _v3, _slope)); intptr += 16; ptr += 16; #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0!, {d8-d15} \n" "vmov q0, %q6 \n" "vmov q1, %q7 \n" "vmov q2, %q6 \n" "vmov q3, %q7 \n" "vcvt.f32.s32 q4, q4 \n" "vcvt.f32.s32 q5, q5 \n" "vcvt.f32.s32 q6, q6 \n" "vcvt.f32.s32 q7, q7 \n" "vmla.f32 q0, q4, %q4 \n" "vmla.f32 q1, q5, %q5 \n" "vmla.f32 q2, q6, %q4 \n" "vmla.f32 q3, q7, %q5 \n" "vmul.f32 q4, q0, %q8 \n" "vmul.f32 q5, q1, %q8 \n" "vmul.f32 q6, q2, %q8 \n" "vmul.f32 q7, q3, %q8 \n" "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" "vcvtr.s32.f32 s8, s8 \n" "vcvtr.s32.f32 s9, s9 \n" "vcvtr.s32.f32 s10, s10 \n" "vcvtr.s32.f32 s11, s11 \n" "vcvtr.s32.f32 s12, s12 \n" "vcvtr.s32.f32 s13, s13 \n" "vcvtr.s32.f32 s14, s14 \n" "vcvtr.s32.f32 s15, s15 \n" "vcvtr.s32.f32 s16, s16 \n" "vcvtr.s32.f32 s17, s17 \n" "vcvtr.s32.f32 s18, s18 \n" "vcvtr.s32.f32 s19, s19 \n" "vcvtr.s32.f32 s20, s20 \n" "vcvtr.s32.f32 s21, s21 \n" "vcvtr.s32.f32 s22, s22 \n" "vcvtr.s32.f32 s23, s23 \n" "vcvtr.s32.f32 s24, s24 \n" "vcvtr.s32.f32 s25, s25 \n" "vcvtr.s32.f32 s26, s26 \n" "vcvtr.s32.f32 s27, s27 \n" "vcvtr.s32.f32 s28, s28 \n" "vcvtr.s32.f32 s29, s29 \n" "vcvtr.s32.f32 s30, s30 \n" "vcvtr.s32.f32 s31, s31 \n" "vqmovn.s32 d0, q0 \n" "vqmovn.s32 d1, q1 \n" "vqmovn.s32 d4, q2 \n" "vqmovn.s32 d5, q3 \n" "vqmovn.s32 d8, q4 \n" "vqmovn.s32 d9, q5 \n" "vqmovn.s32 d12, q6 \n" "vqmovn.s32 d13, q7 \n" "vqmovn.s16 d0, q0 \n" "vqmovn.s16 d1, q2 \n" "vqmovn.s16 d8, q4 \n" "vqmovn.s16 d9, q6 \n" "vmax.s8 q0, q0, q4 \n" "vst1.s8 {d0-d1}, [%1 :128]! \n" : "=r"(intptr), "=r"(ptr) : "0"(intptr), "1"(ptr), "w"(_scale0), // %4 "w"(_scale1), // %5 "w"(_bias0), // %6 "w"(_bias1), // %7 "w"(_slope) // %8 : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"); #endif // __aarch64__ } for (; i < size; i++) { #if __aarch64__ float32x4_t _v0 = vcvtq_f32_s32(vld1q_s32(intptr)); float32x4_t _v1 = vcvtq_f32_s32(vld1q_s32(intptr + 4)); _v0 = vmlaq_f32(_bias0, _v0, _scale0); _v1 = vmlaq_f32(_bias1, _v1, _scale1); vst1_s8(ptr, float2int8leakyrelu(_v0, _v1, _slope)); intptr += 8; ptr += 8; #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.s32 {d8-d11}, [%0 :128]! \n" "vmov q0, %q6 \n" "vmov q1, %q7 \n" "vcvt.f32.s32 q4, q4 \n" "vcvt.f32.s32 q5, q5 \n" "vmla.f32 q0, q4, %q4 \n" "vmla.f32 q1, q5, %q5 \n" "vmul.f32 q2, q0, %q8 \n" "vmul.f32 q3, q1, %q8 \n" "vcvtr.s32.f32 s0, s0 \n" "vcvtr.s32.f32 s1, s1 \n" "vcvtr.s32.f32 s2, s2 \n" "vcvtr.s32.f32 s3, s3 \n" "vcvtr.s32.f32 s4, s4 \n" "vcvtr.s32.f32 s5, s5 \n" "vcvtr.s32.f32 s6, s6 \n" "vcvtr.s32.f32 s7, s7 \n" "vcvtr.s32.f32 s8, s8 \n" "vcvtr.s32.f32 s9, s9 \n" "vcvtr.s32.f32 s10, s10 \n" "vcvtr.s32.f32 s11, s11 \n" "vcvtr.s32.f32 s12, s12 \n" "vcvtr.s32.f32 s13, s13 \n" "vcvtr.s32.f32 s14, s14 \n" "vcvtr.s32.f32 s15, s15 \n" "vqmovn.s32 d8, q0 \n" "vqmovn.s32 d9, q1 \n" "vqmovn.s32 d10, q2 \n" "vqmovn.s32 d11, q3 \n" "vqmovn.s16 d8, q4 \n" "vqmovn.s16 d10, q5 \n" "vmax.s8 d8, d8, d10 \n" "vst1.s8 {d8}, [%1 :64]! \n" : "=r"(intptr), "=r"(ptr) : "0"(intptr), "1"(ptr), "w"(_scale0), // %4 "w"(_scale1), // %5 "w"(_bias0), // %6 "w"(_bias1), // %7 "w"(_slope) // %8 : "memory", "q0", "q1", "q2", "q3", "q4", "q5"); #endif // __aarch64__ } } } }
temporal_sum_method.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Suneth Warnakulasuriya (https://github.com/sunethwarna) // #if !defined(KRATOS_TEMPORAL_SUM_METHOD_H_INCLUDED) #define KRATOS_TEMPORAL_SUM_METHOD_H_INCLUDED // System includes // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" // Application includes #include "custom_methods/temporal_method.h" #include "custom_utilities/method_utilities.h" #include "custom_utilities/temporal_method_utilities.h" namespace Kratos { ///@addtogroup StatisticsApplication ///@{ ///@name Kratos Globals ///@{ namespace TemporalMethods { template <class TContainerType, class TContainerItemType, template <class T> class TDataRetrievalFunctor, template <class T> class TDataStorageFunctor> class TemporalSumMethod { public: template <class TDataType> class ValueMethod : public TemporalMethod { public: KRATOS_CLASS_POINTER_DEFINITION(ValueMethod); ValueMethod( ModelPart& rModelPart, const std::string& rNormType, const Variable<TDataType>& rInputVariable, const int EchoLevel, const Variable<TDataType>& rOutputVariable) : TemporalMethod(rModelPart, EchoLevel), mrInputVariable(rInputVariable), mrOutputVariable(rOutputVariable) { } void CalculateStatistics() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); const double delta_time = this->GetDeltaTime(); const int number_of_items = r_container.size(); #pragma omp parallel for for (int i = 0; i < number_of_items; ++i) { TContainerItemType& r_item = *(r_container.begin() + i); const TDataType& r_input_value = TDataRetrievalFunctor<TContainerItemType>()(r_item, mrInputVariable); TDataType& r_output_value = TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputVariable); MethodUtilities::DataTypeSizeChecker(r_input_value, r_output_value); TemporalSumMethod::CalculateSum<TDataType>( r_output_value, r_input_value, delta_time); } KRATOS_INFO_IF("TemporalValueSumMethod", this->GetEchoLevel() > 1) << "Calculated temporal value sum for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " output variable for " << this->GetModelPart().Name() << ".\n"; } void InitializeStatisticsVariables() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); auto& initializer_method = TemporalMethodUtilities::InitializeVariables<TContainerType, TContainerItemType, TDataRetrievalFunctor, TDataStorageFunctor, TDataType>; initializer_method(r_container, mrOutputVariable, mrInputVariable); KRATOS_INFO_IF("TemporalValueSumMethod", this->GetEchoLevel() > 0) << "Initialized temporal value sum method for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " output variable for " << this->GetModelPart().Name() << ".\n"; } private: const Variable<TDataType>& mrInputVariable; const Variable<TDataType>& mrOutputVariable; }; template <class TDataType> class NormMethod : public TemporalMethod { public: KRATOS_CLASS_POINTER_DEFINITION(NormMethod); NormMethod( ModelPart& rModelPart, const std::string& rNormType, const Variable<TDataType>& rInputVariable, const int EchoLevel, const Variable<double>& rOutputVariable) : TemporalMethod(rModelPart, EchoLevel), mNormType(rNormType), mrInputVariable(rInputVariable), mrOutputVariable(rOutputVariable) { } void CalculateStatistics() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); const auto& norm_method = MethodUtilities::GetNormMethod(mrInputVariable, mNormType); const double delta_time = this->GetDeltaTime(); const int number_of_items = r_container.size(); #pragma omp parallel for for (int i = 0; i < number_of_items; ++i) { TContainerItemType& r_item = *(r_container.begin() + i); const TDataType& r_input_value = TDataRetrievalFunctor<TContainerItemType>()(r_item, mrInputVariable); const double input_norm_value = norm_method(r_input_value); double& r_output_value = TDataStorageFunctor<TContainerItemType>()(r_item, mrOutputVariable); TemporalSumMethod::CalculateSum<double>( r_output_value, input_norm_value, delta_time); } KRATOS_INFO_IF("TemporalNormSumMethod", this->GetEchoLevel() > 1) << "Calculated temporal norm sum for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " output variable for " << this->GetModelPart().Name() << ".\n"; } void InitializeStatisticsVariables() override { TContainerType& r_container = MethodUtilities::GetDataContainer<TContainerType>(this->GetModelPart()); auto& initializer_method = TemporalMethodUtilities::InitializeVariables<TContainerType, TContainerItemType, TDataStorageFunctor>; initializer_method(r_container, mrOutputVariable, 0.0); KRATOS_INFO_IF("TemporalNormSumMethod", this->GetEchoLevel() > 0) << "Initialized temporal norm sum method for " << mrInputVariable.Name() << " input variable with " << mrOutputVariable.Name() << " output variable for " << this->GetModelPart().Name() << ".\n"; } private: const std::string mNormType; const Variable<TDataType>& mrInputVariable; const Variable<double>& mrOutputVariable; }; std::vector<TemporalMethod::Pointer> static CreateTemporalMethodObject( ModelPart& rModelPart, const std::string& rNormType, const int EchoLevel, Parameters Params) { KRATOS_TRY Parameters default_parameters = Parameters(R"( { "input_variables" : [], "output_variables" : [] })"); Params.RecursivelyValidateAndAssignDefaults(default_parameters); const std::vector<std::string>& input_variable_names_list = Params["input_variables"].GetStringArray(); const std::vector<std::string>& output_variable_names_list = Params["output_variables"].GetStringArray(); std::vector<TemporalMethod::Pointer> method_list; if (rNormType == "none") // for non norm types { MethodUtilities::CheckInputOutputVariables( input_variable_names_list, output_variable_names_list); const int number_of_variables = input_variable_names_list.size(); for (int i = 0; i < number_of_variables; ++i) { const std::string& r_variable_input_name = input_variable_names_list[i]; const std::string& r_variable_output_name = output_variable_names_list[i]; ADD_TEMPORAL_VALUE_METHOD_ONE_OUTPUT_VARIABLE_OBJECT( rModelPart, rNormType, r_variable_input_name, EchoLevel, r_variable_output_name, method_list, ValueMethod) } } else // for values with norms { MethodUtilities::CheckVariableType<double>(output_variable_names_list); const int number_of_variables = input_variable_names_list.size(); for (int i = 0; i < number_of_variables; ++i) { const std::string& r_variable_input_name = input_variable_names_list[i]; const std::string& r_variable_output_name = output_variable_names_list[i]; ADD_TEMPORAL_NORM_METHOD_ONE_OUTPUT_VARIABLE_OBJECT( rModelPart, rNormType, r_variable_input_name, EchoLevel, r_variable_output_name, method_list, NormMethod) } } return method_list; KRATOS_CATCH(""); } private: template <class TDataType> void static CalculateSum(TDataType& rSum, const TDataType& rNewDataPoint, const double DeltaTime) { rSum = (rSum + rNewDataPoint * DeltaTime); } }; } // namespace TemporalMethods } // namespace Kratos #endif // KRATOS_TEMPORAL_SUM_METHOD_H_INCLUDED
par_coarsen.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * *****************************************************************************/ /* following should be in a header file */ #include "_hypre_parcsr_ls.h" /*==========================================================================*/ /*==========================================================================*/ /** Selects a coarse "grid" based on the graph of a matrix. Notes: \begin{itemize} \item The underlying matrix storage scheme is a hypre_ParCSR matrix. \item The routine returns the following: \begin{itemize} \item S - a ParCSR matrix representing the "strength matrix". This is used in the "build interpolation" routine. \item CF\_marker - an array indicating both C-pts (value = 1) and F-pts (value = -1) \end{itemize} \item We define the following temporary storage: \begin{itemize} \item measure\_array - an array containing the "measures" for each of the fine-grid points \item graph\_array - an array containing the list of points in the "current subgraph" being considered in the coarsening process. \end{itemize} \item The graph of the "strength matrix" for A is a subgraph of the graph of A, but requires nonsymmetric storage even if A is symmetric. This is because of the directional nature of the "strengh of dependence" notion (see below). Since we are using nonsymmetric storage for A right now, this is not a problem. If we ever add the ability to store A symmetrically, then we could store the strength graph as floats instead of doubles to save space. \item This routine currently "compresses" the strength matrix. We should consider the possibility of defining this matrix to have the same "nonzero structure" as A. To do this, we could use the same A\_i and A\_j arrays, and would need only define the S\_data array. There are several pros and cons to discuss. \end{itemize} Terminology: \begin{itemize} \item Ruge's terminology: A point is "strongly connected to" $j$, or "strongly depends on" $j$, if $-a_ij >= \theta max_{l != j} \{-a_il\}$. \item Here, we retain some of this terminology, but with a more generalized notion of "strength". We also retain the "natural" graph notation for representing the directed graph of a matrix. That is, the nonzero entry $a_ij$ is represented as: i --> j. In the strength matrix, S, the entry $s_ij$ is also graphically denoted as above, and means both of the following: \begin{itemize} \item $i$ "depends on" $j$ with "strength" $s_ij$ \item $j$ "influences" $i$ with "strength" $s_ij$ \end{itemize} \end{itemize} {\bf Input files:} _hypre_parcsr_ls.h @return Error code. @param A [IN] coefficient matrix @param strength_threshold [IN] threshold parameter used to define strength @param S_ptr [OUT] strength matrix @param CF_marker_ptr [IN/OUT] array indicating C/F points @see */ /*--------------------------------------------------------------------------*/ #define C_PT 1 #define F_PT -1 #define SF_PT -3 #define COMMON_C_PT 2 #define Z_PT -2 HYPRE_Int hypre_BoomerAMGCoarsen( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int CF_init, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstColDiag(S); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)hypre_CSRMatrixNumCols(S_diag); HYPRE_Int num_cols_offd = 0; hypre_CSRMatrix *S_ext; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data; HYPRE_Real *buf_data; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd; HYPRE_Real *measure_array; HYPRE_Int *graph_array; HYPRE_Int *graph_array_offd; HYPRE_Int graph_size; HYPRE_BigInt big_graph_size; HYPRE_Int graph_offd_size; HYPRE_BigInt global_graph_size; HYPRE_Int i, j, k, kc, jS, kS, ig, elmt; HYPRE_Int index, start, my_id, num_procs, jrow, cnt, nnzrow; HYPRE_Int use_commpkg_A = 0; HYPRE_Int break_var = 1; HYPRE_Real wall_time; HYPRE_Int iter = 0; HYPRE_BigInt big_k; #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /*-------------------------------------------------------------- * Compute a ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ S_ext = NULL; if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (!comm_pkg) { use_commpkg_A = 1; comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); num_cols_offd = hypre_CSRMatrixNumCols(S_offd); S_diag_j = hypre_CSRMatrixJ(S_diag); if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } /*---------------------------------------------------------- * Compute the measures * * The measures are currently given by the column sums of S. * Hence, measure_array[i] is the number of influences * of variable i. * * The measures are augmented by a random number * between 0 and 1. *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Real, num_variables + num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < S_offd_i[num_variables]; i++) { measure_array[num_variables + S_offd_j[i]] += 1.0; } if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); for (i = 0; i < S_diag_i[num_variables]; i++) { measure_array[S_diag_j[i]] += 1.0; } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)] += buf_data[index++]; } for (i = num_variables; i < num_variables + num_cols_offd; i++) { measure_array[i] = 0; } /* this augments the measures */ if (CF_init == 2) { hypre_BoomerAMGIndepSetInit(S, measure_array, 1); } else { hypre_BoomerAMGIndepSetInit(S, measure_array, 0); } /*--------------------------------------------------- * Initialize the graph array * graph_array contains interior points in elements 0 ... num_variables-1 * followed by boundary values *---------------------------------------------------*/ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); if (num_cols_offd) { graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } else { graph_array_offd = NULL; } /* initialize measure array and graph array */ for (ig = 0; ig < num_cols_offd; ig++) { graph_array_offd[ig] = ig; } /*--------------------------------------------------- * Initialize the C/F marker array * C/F marker array contains interior points in elements 0 ... * num_variables-1 followed by boundary values *---------------------------------------------------*/ graph_offd_size = num_cols_offd; /* Allocate CF_marker if not done before */ if (*CF_marker_ptr == NULL) { *CF_marker_ptr = hypre_IntArrayCreate(num_variables); hypre_IntArrayInitialize(*CF_marker_ptr); } CF_marker = hypre_IntArrayData(*CF_marker_ptr); if (CF_init == 1) { cnt = 0; for (i = 0; i < num_variables; i++) { if ( CF_marker[i] != SF_PT ) { if ( (S_offd_i[i + 1] - S_offd_i[i]) > 0 || (CF_marker[i] == F_PT) ) { CF_marker[i] = 0; } if ( CF_marker[i] == Z_PT) { if ( (S_diag_i[i + 1] - S_diag_i[i]) > 0 || (measure_array[i] >= 1.0) ) { CF_marker[i] = 0; graph_array[cnt++] = i; } else { CF_marker[i] = F_PT; } } else { graph_array[cnt++] = i; } } else { measure_array[i] = 0; } } } else { cnt = 0; for (i = 0; i < num_variables; i++) { if ( CF_marker[i] != SF_PT ) { CF_marker[i] = 0; nnzrow = (S_diag_i[i + 1] - S_diag_i[i]) + (S_offd_i[i + 1] - S_offd_i[i]); if (nnzrow == 0) { CF_marker[i] = SF_PT; measure_array[i] = 0; } else { graph_array[cnt++] = i; } } else { measure_array[i] = 0; } } } graph_size = cnt; if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } else { CF_marker_offd = NULL; } for (i = 0; i < num_cols_offd; i++) { CF_marker_offd[i] = 0; } /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ if (num_procs > 1) { if (use_commpkg_A) { S_ext = hypre_ParCSRMatrixExtractBExt(S, A, 0); } else { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 0); } S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); } /* compress S_ext and convert column numbers*/ index = 0; for (i = 0; i < num_cols_offd; i++) { for (j = S_ext_i[i]; j < S_ext_i[i + 1]; j++) { big_k = S_ext_j[j]; if (big_k >= col_1 && big_k < col_n) { S_ext_j[index++] = big_k - col_1; } else { kc = hypre_BigBinarySearch(col_map_offd, big_k, num_cols_offd); if (kc > -1) { S_ext_j[index++] = (HYPRE_BigInt)(-kc - 1); } } } S_ext_i[i] = index; } for (i = num_cols_offd; i > 0; i--) { S_ext_i[i] = S_ext_i[i - 1]; } if (num_procs > 1) { S_ext_i[0] = 0; } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Initialize CLJP phase = %f\n", my_id, wall_time); } while (1) { /*------------------------------------------------ * Exchange boundary data, i.i. get measures and S_ext_data *------------------------------------------------*/ if (num_procs > 1) comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)] += buf_data[index++]; } /*------------------------------------------------ * Set F-pts and update subgraph *------------------------------------------------*/ if (iter || (CF_init != 1)) { for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if ( (CF_marker[i] != C_PT) && (measure_array[i] < 1) ) { /* set to be an F-pt */ CF_marker[i] = F_PT; /* make sure all dependencies have been accounted for */ for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++) { if (S_diag_j[jS] > -1) { CF_marker[i] = 0; } } for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++) { if (S_offd_j[jS] > -1) { CF_marker[i] = 0; } } } if (CF_marker[i]) { measure_array[i] = 0; /* take point out of the subgraph */ graph_size--; graph_array[ig] = graph_array[graph_size]; graph_array[graph_size] = i; ig--; } } } /*------------------------------------------------ * Exchange boundary data, i.i. get measures *------------------------------------------------*/ if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j); buf_data[index++] = measure_array[jrow]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, &measure_array[num_variables]); hypre_ParCSRCommHandleDestroy(comm_handle); } /*------------------------------------------------ * Debugging: * * Uncomment the sections of code labeled * "debugging" to generate several files that * can be visualized using the `coarsen.m' * matlab routine. *------------------------------------------------*/ #if 0 /* debugging */ /* print out measures */ hypre_sprintf(filename, "coarsen.out.measures.%04d", iter); fp = fopen(filename, "w"); for (i = 0; i < num_variables; i++) { hypre_fprintf(fp, "%f\n", measure_array[i]); } fclose(fp); /* print out strength matrix */ hypre_sprintf(filename, "coarsen.out.strength.%04d", iter); hypre_CSRMatrixPrint(S, filename); /* print out C/F marker */ hypre_sprintf(filename, "coarsen.out.CF.%04d", iter); fp = fopen(filename, "w"); for (i = 0; i < num_variables; i++) { hypre_fprintf(fp, "%d\n", CF_marker[i]); } fclose(fp); iter++; #endif /*------------------------------------------------ * Test for convergence *------------------------------------------------*/ big_graph_size = (HYPRE_BigInt) graph_size; hypre_MPI_Allreduce(&big_graph_size, &global_graph_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); if (global_graph_size == 0) { break; } /*------------------------------------------------ * Pick an independent set of points with * maximal measure. *------------------------------------------------*/ if (iter || (CF_init != 1)) { hypre_BoomerAMGIndepSet(S, measure_array, graph_array, graph_size, graph_array_offd, graph_offd_size, CF_marker, CF_marker_offd); if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j); if (!int_buf_data[index++] && CF_marker[elmt] > 0) { CF_marker[elmt] = 0; } } } } iter++; /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j); int_buf_data[index++] = CF_marker[elmt]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } for (ig = 0; ig < graph_offd_size; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i] < 0) { /* take point out of the subgraph */ graph_offd_size--; graph_array_offd[ig] = graph_array_offd[graph_offd_size]; graph_array_offd[graph_offd_size] = i; ig--; } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d iter %d comm. and subgraph update = %f\n", my_id, iter, wall_time); } /*------------------------------------------------ * Set C_pts and apply heuristics. *------------------------------------------------*/ for (i = num_variables; i < num_variables + num_cols_offd; i++) { measure_array[i] = 0; } if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; /*--------------------------------------------- * Heuristic: C-pts don't interpolate from * neighbors that influence them. *---------------------------------------------*/ if (CF_marker[i] > 0) { /* set to be a C-pt */ CF_marker[i] = C_PT; for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++) { j = S_diag_j[jS]; if (j > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS] - 1; /* decrement measures of unmarked neighbors */ if (!CF_marker[j]) { measure_array[j]--; } } } for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++) { j = S_offd_j[jS]; if (j > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS] - 1; /* decrement measures of unmarked neighbors */ if (!CF_marker_offd[j]) { measure_array[j + num_variables]--; } } } } else { /* marked dependencies */ for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++) { j = S_diag_j[jS]; if (j < 0) { j = -j - 1; } if (CF_marker[j] > 0) { if (S_diag_j[jS] > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS] - 1; } /* IMPORTANT: consider all dependencies */ /* temporarily modify CF_marker */ CF_marker[j] = COMMON_C_PT; } else if (CF_marker[j] == SF_PT) { if (S_diag_j[jS] > -1) { /* "remove" edge from S */ S_diag_j[jS] = -S_diag_j[jS] - 1; } } } for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++) { j = S_offd_j[jS]; if (j < 0) { j = -j - 1; } if (CF_marker_offd[j] > 0) { if (S_offd_j[jS] > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS] - 1; } /* IMPORTANT: consider all dependencies */ /* temporarily modify CF_marker */ CF_marker_offd[j] = COMMON_C_PT; } else if (CF_marker_offd[j] == SF_PT) { if (S_offd_j[jS] > -1) { /* "remove" edge from S */ S_offd_j[jS] = -S_offd_j[jS] - 1; } } } /* unmarked dependencies */ for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++) { if (S_diag_j[jS] > -1) { j = S_diag_j[jS]; break_var = 1; /* check for common C-pt */ for (kS = S_diag_i[j]; kS < S_diag_i[j + 1]; kS++) { k = S_diag_j[kS]; if (k < 0) { k = -k - 1; } /* IMPORTANT: consider all dependencies */ if (CF_marker[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_diag_j[jS] = -S_diag_j[jS] - 1; measure_array[j]--; break_var = 0; break; } } if (break_var) { for (kS = S_offd_i[j]; kS < S_offd_i[j + 1]; kS++) { k = S_offd_j[kS]; if (k < 0) { k = -k - 1; } /* IMPORTANT: consider all dependencies */ if ( CF_marker_offd[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_diag_j[jS] = -S_diag_j[jS] - 1; measure_array[j]--; break; } } } } } for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++) { if (S_offd_j[jS] > -1) { j = S_offd_j[jS]; /* check for common C-pt */ for (kS = S_ext_i[j]; kS < S_ext_i[j + 1]; kS++) { k = (HYPRE_Int)S_ext_j[kS]; if (k >= 0) { /* IMPORTANT: consider all dependencies */ if (CF_marker[k] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_offd_j[jS] = -S_offd_j[jS] - 1; measure_array[j + num_variables]--; break; } } else { kc = -k - 1; if (kc > -1 && CF_marker_offd[kc] == COMMON_C_PT) { /* "remove" edge from S and update measure*/ S_offd_j[jS] = -S_offd_j[jS] - 1; measure_array[j + num_variables]--; break; } } } } } } /* reset CF_marker */ for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++) { j = S_diag_j[jS]; if (j < 0) { j = -j - 1; } if (CF_marker[j] == COMMON_C_PT) { CF_marker[j] = C_PT; } } for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++) { j = S_offd_j[jS]; if (j < 0) { j = -j - 1; } if (CF_marker_offd[j] == COMMON_C_PT) { CF_marker_offd[j] = C_PT; } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d CLJP phase = %f graph_size = %d nc_offd = %d\n", my_id, wall_time, graph_size, num_cols_offd); } } /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ /* Reset S_matrix */ for (i = 0; i < S_diag_i[num_variables]; i++) { if (S_diag_j[i] < 0) { S_diag_j[i] = -S_diag_j[i] - 1; } } for (i = 0; i < S_offd_i[num_variables]; i++) { if (S_offd_j[i] < 0) { S_offd_j[i] = -S_offd_j[i] - 1; } } /*for (i=0; i < num_variables; i++) if (CF_marker[i] == SF_PT) CF_marker[i] = F_PT;*/ hypre_TFree(measure_array, HYPRE_MEMORY_HOST); hypre_TFree(graph_array, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(graph_array_offd, HYPRE_MEMORY_HOST); } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); } return hypre_error_flag; } /*========================================================================== * Ruge's coarsening algorithm *==========================================================================*/ #define C_PT 1 #define F_PT -1 #define Z_PT -2 #define SF_PT -3 /* special fine points */ #define SC_PT 3 /* special coarse points */ #define UNDECIDED 0 /************************************************************** * * Ruge Coarsening routine * **************************************************************/ HYPRE_Int hypre_BoomerAMGCoarsenRuge( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int coarsen_type, HYPRE_Int cut_factor, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *A_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *S_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_j = hypre_CSRMatrixJ(S_diag); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = NULL; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_BigInt num_nonzeros = hypre_ParCSRMatrixNumNonzeros(A); HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int avg_nnzrow; hypre_CSRMatrix *S_ext = NULL; HYPRE_Int *S_ext_i = NULL; HYPRE_BigInt *S_ext_j = NULL; hypre_CSRMatrix *ST; HYPRE_Int *ST_i; HYPRE_Int *ST_j; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int ci_tilde = -1; HYPRE_Int ci_tilde_mark = -1; HYPRE_Int ci_tilde_offd = -1; HYPRE_Int ci_tilde_offd_mark = -1; HYPRE_Int *measure_array; HYPRE_Int *graph_array; HYPRE_Int *int_buf_data = NULL; HYPRE_Int *ci_array = NULL; HYPRE_BigInt big_k; HYPRE_Int i, j, k, jS; HYPRE_Int ji, jj, jk, jm, index; HYPRE_Int set_empty = 1; HYPRE_Int C_i_nonempty = 0; HYPRE_Int cut, nnzrow; HYPRE_Int num_procs, my_id; HYPRE_Int num_sends = 0; HYPRE_BigInt first_col; HYPRE_Int start; HYPRE_BigInt col_0, col_n; hypre_LinkList LoL_head; hypre_LinkList LoL_tail; HYPRE_Int *lists, *where; HYPRE_Int measure, new_meas; HYPRE_Int meas_type = 0; HYPRE_Int agg_2 = 0; HYPRE_Int num_left, elmt; HYPRE_Int nabor, nabor_two; HYPRE_Int use_commpkg_A = 0; HYPRE_Int break_var = 0; HYPRE_Int f_pnt = F_PT; HYPRE_Real wall_time; if (coarsen_type < 0) { coarsen_type = -coarsen_type; } if (measure_type == 1 || measure_type == 4) { meas_type = 1; } if (measure_type == 4 || measure_type == 3) { agg_2 = 1; } /*------------------------------------------------------- * Initialize the C/F marker, LoL_head, LoL_tail arrays *-------------------------------------------------------*/ LoL_head = NULL; LoL_tail = NULL; lists = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); where = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /*-------------------------------------------------------------- * Compute a CSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: the entries are negative initially, corresponding * to "unaccounted-for" dependence. *----------------------------------------------------------------*/ if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } first_col = hypre_ParCSRMatrixFirstColDiag(S); col_0 = first_col - 1; col_n = col_0 + (HYPRE_BigInt)num_variables; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (!comm_pkg) { use_commpkg_A = 1; comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } jS = S_i[num_variables]; ST = hypre_CSRMatrixCreate(num_variables, num_variables, jS); hypre_CSRMatrixMemoryLocation(ST) = HYPRE_MEMORY_HOST; ST_i = hypre_CTAlloc(HYPRE_Int, num_variables + 1, HYPRE_MEMORY_HOST); ST_j = hypre_CTAlloc(HYPRE_Int, jS, HYPRE_MEMORY_HOST); hypre_CSRMatrixI(ST) = ST_i; hypre_CSRMatrixJ(ST) = ST_j; /*---------------------------------------------------------- * generate transpose of S, ST *----------------------------------------------------------*/ for (i = 0; i <= num_variables; i++) { ST_i[i] = 0; } for (i = 0; i < jS; i++) { ST_i[S_j[i] + 1]++; } for (i = 0; i < num_variables; i++) { ST_i[i + 1] += ST_i[i]; } for (i = 0; i < num_variables; i++) { for (j = S_i[i]; j < S_i[i + 1]; j++) { index = S_j[j]; ST_j[ST_i[index]] = i; ST_i[index]++; } } for (i = num_variables; i > 0; i--) { ST_i[i] = ST_i[i - 1]; } ST_i[0] = 0; /*---------------------------------------------------------- * Compute the measures * * The measures are given by the row sums of ST. * Hence, measure_array[i] is the number of influences * of variable i. * correct actual measures through adding influences from * neighbor processors *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); for (i = 0; i < num_variables; i++) { measure_array[i] = ST_i[i + 1] - ST_i[i]; } /* special case for Falgout coarsening */ if (coarsen_type == 6) { f_pnt = Z_PT; coarsen_type = 1; } if (coarsen_type == 10) { f_pnt = Z_PT; coarsen_type = 11; } if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1) { if (use_commpkg_A) { S_ext = hypre_ParCSRMatrixExtractBExt(S, A, 0); } else { S_ext = hypre_ParCSRMatrixExtractBExt(S, S, 0); } S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); HYPRE_Int num_nonzeros = S_ext_i[num_cols_offd]; /*first_col = hypre_ParCSRMatrixFirstColDiag(S); col_0 = first_col-1; col_n = col_0+num_variables; */ if (meas_type) { for (i = 0; i < num_nonzeros; i++) { index = (HYPRE_Int)(S_ext_j[i] - first_col); if (index > -1 && index < num_variables) { measure_array[index]++; } } } } /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } /* first coarsening phase */ /************************************************************* * * Initialize the lists * *************************************************************/ /* Allocate CF_marker if not done before */ if (*CF_marker_ptr == NULL) { *CF_marker_ptr = hypre_IntArrayCreate(num_variables); hypre_IntArrayInitialize(*CF_marker_ptr); } CF_marker = hypre_IntArrayData(*CF_marker_ptr); num_left = 0; for (j = 0; j < num_variables; j++) { if (CF_marker[j] == 0) { nnzrow = (S_i[j + 1] - S_i[j]) + (S_offd_i[j + 1] - S_offd_i[j]); if (nnzrow == 0) { CF_marker[j] = SF_PT; if (agg_2) { CF_marker[j] = SC_PT; } measure_array[j] = 0; } else { CF_marker[j] = UNDECIDED; num_left++; } } else { measure_array[j] = 0; } } /* Set dense rows as SF_PT */ if ((cut_factor > 0) && (global_num_rows > 0)) { avg_nnzrow = num_nonzeros / global_num_rows; cut = cut_factor * avg_nnzrow; for (j = 0; j < num_variables; j++) { nnzrow = (A_i[j + 1] - A_i[j]) + (A_offd_i[j + 1] - A_offd_i[j]); if (nnzrow > cut) { if (CF_marker[j] == UNDECIDED) { num_left--; } CF_marker[j] = SF_PT; } } } for (j = 0; j < num_variables; j++) { measure = measure_array[j]; if (CF_marker[j] != SF_PT && CF_marker[j] != SC_PT) { if (measure > 0) { hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, j, lists, where); } else { if (measure < 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC, "negative measure!\n"); } CF_marker[j] = f_pnt; for (k = S_i[j]; k < S_i[j + 1]; k++) { nabor = S_j[k]; if (CF_marker[nabor] != SF_PT && CF_marker[nabor] != SC_PT) { if (nabor < j) { new_meas = measure_array[nabor]; if (new_meas > 0) { hypre_remove_point(&LoL_head, &LoL_tail, new_meas, nabor, lists, where); } new_meas = ++(measure_array[nabor]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor, lists, where); } else { new_meas = ++(measure_array[nabor]); } } } --num_left; } } } /**************************************************************** * * Main loop of Ruge-Stueben first coloring pass. * * WHILE there are still points to classify DO: * 1) find first point, i, on list with max_measure * make i a C-point, remove it from the lists * 2) For each point, j, in S_i^T, * a) Set j to be an F-point * b) For each point, k, in S_j * move k to the list in LoL with measure one * greater than it occupies (creating new LoL * entry if necessary) * 3) For each point, j, in S_i, * move j to the list in LoL with measure one * smaller than it occupies (creating new LoL * entry if necessary) * ****************************************************************/ while (num_left > 0) { index = LoL_head -> head; CF_marker[index] = C_PT; measure = measure_array[index]; measure_array[index] = 0; --num_left; hypre_remove_point(&LoL_head, &LoL_tail, measure, index, lists, where); for (j = ST_i[index]; j < ST_i[index + 1]; j++) { nabor = ST_j[j]; if (CF_marker[nabor] == UNDECIDED) { CF_marker[nabor] = F_PT; measure = measure_array[nabor]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where); --num_left; for (k = S_i[nabor]; k < S_i[nabor + 1]; k++) { nabor_two = S_j[k]; if (CF_marker[nabor_two] == UNDECIDED) { measure = measure_array[nabor_two]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor_two, lists, where); new_meas = ++(measure_array[nabor_two]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); } } } } for (j = S_i[index]; j < S_i[index + 1]; j++) { nabor = S_j[j]; if (CF_marker[nabor] == UNDECIDED) { measure = measure_array[nabor]; hypre_remove_point(&LoL_head, &LoL_tail, measure, nabor, lists, where); measure_array[nabor] = --measure; if (measure > 0) { hypre_enter_on_lists(&LoL_head, &LoL_tail, measure, nabor, lists, where); } else { CF_marker[nabor] = F_PT; --num_left; for (k = S_i[nabor]; k < S_i[nabor + 1]; k++) { nabor_two = S_j[k]; if (CF_marker[nabor_two] == UNDECIDED) { new_meas = measure_array[nabor_two]; hypre_remove_point(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); new_meas = ++(measure_array[nabor_two]); hypre_enter_on_lists(&LoL_head, &LoL_tail, new_meas, nabor_two, lists, where); } } } } } } hypre_TFree(measure_array, HYPRE_MEMORY_HOST); hypre_CSRMatrixDestroy(ST); if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen 1st pass = %f\n", my_id, wall_time); } hypre_TFree(lists, HYPRE_MEMORY_HOST); hypre_TFree(where, HYPRE_MEMORY_HOST); hypre_TFree(LoL_head, HYPRE_MEMORY_HOST); hypre_TFree(LoL_tail, HYPRE_MEMORY_HOST); for (i = 0; i < num_variables; i++) { if (CF_marker[i] == SC_PT) { CF_marker[i] = C_PT; } } if (coarsen_type == 11) { if (meas_type && num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); } return 0; } /* second pass, check fine points for coarse neighbors for coarsen_type = 2, the second pass includes off-processore boundary points */ /*--------------------------------------------------- * Initialize the graph array *---------------------------------------------------*/ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); for (i = 0; i < num_variables; i++) { graph_array[i] = -1; } if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } if (coarsen_type == 2) { /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { ci_array[i] = -1; } for (i = 0; i < num_variables; i++) { if (ci_tilde_mark != i) { ci_tilde = -1; } if (ci_tilde_offd_mark != i) { ci_tilde_offd = -1; } if (CF_marker[i] == -1) { break_var = 1; for (ji = S_i[i]; ji < S_i[i + 1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) { graph_array[j] = i; } } for (ji = S_offd_i[i]; ji < S_offd_i[i + 1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] > 0) { ci_array[j] = i; } } for (ji = S_i[i]; ji < S_i[i + 1]; ji++) { j = S_j[ji]; if (CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j + 1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } if (set_empty) { for (jj = S_offd_i[j]; jj < S_offd_i[j + 1]; jj++) { index = S_offd_j[jj]; if (ci_array[index] == i) { set_empty = 0; break; } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break_var = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break_var = 0; break; } } } } if (break_var) { for (ji = S_offd_i[i]; ji < S_offd_i[i + 1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] == -1) { set_empty = 1; for (jj = S_ext_i[j]; jj < S_ext_i[j + 1]; jj++) { big_k = S_ext_j[jj]; if (big_k > col_0 && big_k < col_n) /* index interior */ { if (graph_array[(HYPRE_Int)(big_k - first_col)] == i) { set_empty = 0; break; } } else { jk = hypre_BigBinarySearch(col_map_offd, big_k, num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde_offd = j; ci_tilde_offd_mark = i; CF_marker_offd[j] = 1; C_i_nonempty = 1; i--; break; } } } } } } } } else { for (i = 0; i < num_variables; i++) { if (ci_tilde_mark != i) { ci_tilde = -1; } if (CF_marker[i] == -1) { for (ji = S_i[i]; ji < S_i[i + 1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) { graph_array[j] = i; } } for (ji = S_i[i]; ji < S_i[i + 1]; ji++) { j = S_j[ji]; if (CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j + 1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } C_i_nonempty = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break; } } } } } } } if (debug_flag == 3 && coarsen_type != 2) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen 2nd pass = %f\n", my_id, wall_time); } /* third pass, check boundary fine points for coarse neighbors */ if (coarsen_type == 3 || coarsen_type == 4) { if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { ci_array[i] = -1; } } if (coarsen_type > 1 && coarsen_type < 5) { for (i = 0; i < num_variables; i++) { graph_array[i] = -1; } for (i = 0; i < num_cols_offd; i++) { if (ci_tilde_mark != i) { ci_tilde = -1; } if (ci_tilde_offd_mark != i) { ci_tilde_offd = -1; } if (CF_marker_offd[i] == -1) { for (ji = S_ext_i[i]; ji < S_ext_i[i + 1]; ji++) { big_k = S_ext_j[ji]; if (big_k > col_0 && big_k < col_n) { j = (HYPRE_Int)(big_k - first_col); if (CF_marker[j] > 0) { graph_array[j] = i; } } else { jj = hypre_BigBinarySearch(col_map_offd, big_k, num_cols_offd); if (jj != -1 && CF_marker_offd[jj] > 0) { ci_array[jj] = i; } } } for (ji = S_ext_i[i]; ji < S_ext_i[i + 1]; ji++) { big_k = S_ext_j[ji]; if (big_k > col_0 && big_k < col_n) { j = (HYPRE_Int)(big_k - first_col); if ( CF_marker[j] == -1) { set_empty = 1; for (jj = S_i[j]; jj < S_i[j + 1]; jj++) { index = S_j[jj]; if (graph_array[index] == i) { set_empty = 0; break; } } for (jj = S_offd_i[j]; jj < S_offd_i[j + 1]; jj++) { index = S_offd_j[jj]; if (ci_array[index] == i) { set_empty = 0; break; } } if (set_empty) { if (C_i_nonempty) { CF_marker_offd[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde = j; ci_tilde_mark = i; CF_marker[j] = 1; C_i_nonempty = 1; i--; break; } } } } else { jm = hypre_BigBinarySearch(col_map_offd, big_k, num_cols_offd); if (jm != -1 && CF_marker_offd[jm] == -1) { set_empty = 1; for (jj = S_ext_i[jm]; jj < S_ext_i[jm + 1]; jj++) { big_k = S_ext_j[jj]; if (big_k > col_0 && big_k < col_n) { if (graph_array[(HYPRE_Int)(big_k - first_col)] == i) { set_empty = 0; break; } } else { jk = hypre_BigBinarySearch(col_map_offd, big_k, num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker_offd[i] = 1; if (ci_tilde > -1) { CF_marker[ci_tilde] = -1; ci_tilde = -1; } if (ci_tilde_offd > -1) { CF_marker_offd[ci_tilde_offd] = -1; ci_tilde_offd = -1; } C_i_nonempty = 0; break; } else { ci_tilde_offd = jm; ci_tilde_offd_mark = i; CF_marker_offd[jm] = 1; C_i_nonempty = 1; i--; break; } } } } } } } /*------------------------------------------------ * Send boundary data for CF_marker back *------------------------------------------------*/ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } /* only CF_marker entries from larger procs are accepted if coarsen_type = 4 coarse points are not overwritten */ index = 0; if (coarsen_type != 4) { for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); if (hypre_ParCSRCommPkgSendProc(comm_pkg, i) > my_id) { for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)] = int_buf_data[index++]; } else { index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1) - start; } } } else { for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); if (hypre_ParCSRCommPkgSendProc(comm_pkg, i) > my_id) { for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j); if (CF_marker[elmt] != 1) { CF_marker[elmt] = int_buf_data[index]; } index++; } } else { index += hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1) - start; } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; if (coarsen_type == 4) hypre_printf("Proc = %d Coarsen 3rd pass = %f\n", my_id, wall_time); if (coarsen_type == 3) hypre_printf("Proc = %d Coarsen 3rd pass = %f\n", my_id, wall_time); if (coarsen_type == 2) hypre_printf("Proc = %d Coarsen 2nd pass = %f\n", my_id, wall_time); } } if (coarsen_type == 5) { /*------------------------------------------------ * Exchange boundary data for CF_marker *------------------------------------------------*/ if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } ci_array = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); for (i = 0; i < num_cols_offd; i++) { ci_array[i] = -1; } for (i = 0; i < num_variables; i++) { graph_array[i] = -1; } for (i = 0; i < num_variables; i++) { if (CF_marker[i] == -1 && (S_offd_i[i + 1] - S_offd_i[i]) > 0) { break_var = 1; for (ji = S_i[i]; ji < S_i[i + 1]; ji++) { j = S_j[ji]; if (CF_marker[j] > 0) { graph_array[j] = i; } } for (ji = S_offd_i[i]; ji < S_offd_i[i + 1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] > 0) { ci_array[j] = i; } } for (ji = S_offd_i[i]; ji < S_offd_i[i + 1]; ji++) { j = S_offd_j[ji]; if (CF_marker_offd[j] == -1) { set_empty = 1; for (jj = S_ext_i[j]; jj < S_ext_i[j + 1]; jj++) { big_k = S_ext_j[jj]; if (big_k > col_0 && big_k < col_n) /* index interior */ { if (graph_array[(HYPRE_Int)(big_k - first_col)] == i) { set_empty = 0; break; } } else { jk = hypre_BigBinarySearch(col_map_offd, big_k, num_cols_offd); if (jk != -1) { if (ci_array[jk] == i) { set_empty = 0; break; } } } } if (set_empty) { if (C_i_nonempty) { CF_marker[i] = -2; C_i_nonempty = 0; break; } else { C_i_nonempty = 1; i--; break; } } } } } } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Coarsen special points = %f\n", my_id, wall_time); } } /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ /*if (coarsen_type != 1) { */ hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(ci_array, HYPRE_MEMORY_HOST); /*} */ hypre_TFree(graph_array, HYPRE_MEMORY_HOST); if ((meas_type || (coarsen_type != 1 && coarsen_type != 11)) && num_procs > 1) { hypre_CSRMatrixDestroy(S_ext); } return hypre_error_flag; } HYPRE_Int hypre_BoomerAMGCoarsenFalgout( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int cut_factor, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr) { HYPRE_Int ierr = 0; /*------------------------------------------------------- * Perform Ruge coarsening followed by CLJP coarsening *-------------------------------------------------------*/ ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 6, cut_factor, debug_flag, CF_marker_ptr); ierr += hypre_BoomerAMGCoarsen (S, A, 1, debug_flag, CF_marker_ptr); return (ierr); } /*--------------------------------------------------------------------------*/ #define C_PT 1 #define F_PT -1 #define SF_PT -3 #define COMMON_C_PT 2 #define Z_PT -2 /* begin HANS added */ /************************************************************** * * Modified Independent Set Coarsening routine * (don't worry about strong F-F connections * without a common C point) * **************************************************************/ HYPRE_Int hypre_BoomerAMGCoarsenPMISHost( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int CF_init, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PMIS] -= hypre_MPI_Wtime(); #endif MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j; HYPRE_Int num_variables = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int num_cols_offd = 0; /* hypre_CSRMatrix *S_ext; HYPRE_Int *S_ext_i; HYPRE_Int *S_ext_j; */ HYPRE_Int num_sends = 0; HYPRE_Int *int_buf_data; HYPRE_Real *buf_data; HYPRE_Int *CF_marker; HYPRE_Int *CF_marker_offd; HYPRE_Real *measure_array; HYPRE_Int *graph_array; HYPRE_Int *graph_array_offd; HYPRE_Int graph_size; HYPRE_BigInt big_graph_size; HYPRE_Int graph_offd_size; HYPRE_BigInt global_graph_size; HYPRE_Int i, j, jj, jS, ig; HYPRE_Int index, start, my_id, num_procs, jrow, cnt, elmt; HYPRE_Int nnzrow; HYPRE_Int ierr = 0; HYPRE_Real wall_time; HYPRE_Int iter = 0; HYPRE_Int *prefix_sum_workspace; #if 0 /* debugging */ char filename[256]; FILE *fp; HYPRE_Int iter = 0; #endif /******************************************************************************* BEFORE THE INDEPENDENT SET COARSENING LOOP: measure_array: calculate the measures, and communicate them (this array contains measures for both local and external nodes) CF_marker, CF_marker_offd: initialize CF_marker (separate arrays for local and external; 0=unassigned, negative=F point, positive=C point) ******************************************************************************/ /*-------------------------------------------------------------- * Use the ParCSR strength matrix, S. * * For now, the "strength" of dependence/influence is defined in * the following way: i depends on j if * aij > hypre_max (k != i) aik, aii < 0 * or * aij < hypre_min (k != i) aik, aii >= 0 * Then S_ij = 1, else S_ij = 0. * * NOTE: S_data is not used; in stead, only strong columns are retained * in S_j, which can then be used like S_data *----------------------------------------------------------------*/ /*S_ext = NULL; */ if (debug_flag == 3) { wall_time = time_getWallclockSeconds(); } hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (!comm_pkg) { comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); num_cols_offd = hypre_CSRMatrixNumCols(S_offd); S_diag_j = hypre_CSRMatrixJ(S_diag); if (num_cols_offd) { S_offd_j = hypre_CSRMatrixJ(S_offd); } /*---------------------------------------------------------- * Compute the measures * * The measures are currently given by the column sums of S. * Hence, measure_array[i] is the number of influences * of variable i. * * The measures are augmented by a random number * between 0 and 1. *----------------------------------------------------------*/ measure_array = hypre_CTAlloc(HYPRE_Real, num_variables + num_cols_offd, HYPRE_MEMORY_HOST); /* first calculate the local part of the sums for the external nodes */ #ifdef HYPRE_USING_OPENMP HYPRE_Int *measure_array_temp = hypre_CTAlloc(HYPRE_Int, num_variables + num_cols_offd, HYPRE_MEMORY_HOST); #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i = 0; i < S_offd_i[num_variables]; i++) { #pragma omp atomic measure_array_temp[num_variables + S_offd_j[i]]++; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i = 0; i < num_cols_offd; i++) { measure_array[i + num_variables] = measure_array_temp[i + num_variables]; } #else for (i = 0; i < S_offd_i[num_variables]; i++) { measure_array[num_variables + S_offd_j[i]] += 1.0; } #endif // HYPRE_USING_OPENMP /* now send those locally calculated values for the external nodes to the neighboring processors */ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(2, comm_pkg, &measure_array[num_variables], buf_data); } /* calculate the local part for the local nodes */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i = 0; i < S_diag_i[num_variables]; i++) { #pragma omp atomic measure_array_temp[S_diag_j[i]]++; } #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE for (i = 0; i < num_variables; i++) { measure_array[i] = measure_array_temp[i]; } hypre_TFree(measure_array_temp, HYPRE_MEMORY_HOST); #else for (i = 0; i < S_diag_i[num_variables]; i++) { measure_array[S_diag_j[i]] += 1.0; } #endif // HYPRE_USING_OPENMP /* finish the communication */ if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); } /* now add the externally calculated part of the local nodes to the local nodes */ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { measure_array[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)] += buf_data[index++]; } } /* set the measures of the external nodes to zero */ for (i = num_variables; i < num_variables + num_cols_offd; i++) { measure_array[i] = 0; } /* this augments the measures with a random number between 0 and 1 */ /* (only for the local part) */ /* this augments the measures */ if (CF_init == 2 || CF_init == 4) { hypre_BoomerAMGIndepSetInit(S, measure_array, 1); } else { hypre_BoomerAMGIndepSetInit(S, measure_array, 0); } /*--------------------------------------------------- * Initialize the graph arrays, and CF_marker arrays *---------------------------------------------------*/ /* first the off-diagonal part of the graph array */ if (num_cols_offd) { graph_array_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } else { graph_array_offd = NULL; } for (ig = 0; ig < num_cols_offd; ig++) { graph_array_offd[ig] = ig; } graph_offd_size = num_cols_offd; /* now the local part of the graph array, and the local CF_marker array */ graph_array = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); /* Allocate CF_marker if not done before */ if (*CF_marker_ptr == NULL) { *CF_marker_ptr = hypre_IntArrayCreate(num_variables); hypre_IntArrayInitialize(*CF_marker_ptr); } CF_marker = hypre_IntArrayData(*CF_marker_ptr); if (CF_init == 1) { cnt = 0; for (i = 0; i < num_variables; i++) { if ( CF_marker[i] != SF_PT ) { if ( S_offd_i[i + 1] - S_offd_i[i] > 0 || CF_marker[i] == -1 ) { CF_marker[i] = 0; } if ( CF_marker[i] == Z_PT) { if ( measure_array[i] >= 1.0 || S_diag_i[i + 1] - S_diag_i[i] > 0 ) { CF_marker[i] = 0; graph_array[cnt++] = i; } else { CF_marker[i] = F_PT; } } else { graph_array[cnt++] = i; } } else { measure_array[i] = 0; } } } else { cnt = 0; for (i = 0; i < num_variables; i++) { CF_marker[i] = 0; nnzrow = (S_diag_i[i + 1] - S_diag_i[i]) + (S_offd_i[i + 1] - S_offd_i[i]); if (nnzrow == 0) { CF_marker[i] = SF_PT; /* an isolated fine grid */ if (CF_init == 3 || CF_init == 4) { CF_marker[i] = C_PT; } measure_array[i] = 0; } else { graph_array[cnt++] = i; } } } graph_size = cnt; /* now the off-diagonal part of CF_marker */ if (num_cols_offd) { CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } else { CF_marker_offd = NULL; } for (i = 0; i < num_cols_offd; i++) { CF_marker_offd[i] = 0; } /*------------------------------------------------ * Communicate the local measures, which are complete, to the external nodes *------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { jrow = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j); buf_data[index++] = measure_array[jrow]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, &measure_array[num_variables]); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag == 3) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Initialize CLJP phase = %f\n", my_id, wall_time); } /* graph_array2 */ HYPRE_Int *graph_array2 = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST); HYPRE_Int *graph_array_offd2 = NULL; if (num_cols_offd) { graph_array_offd2 = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); } /******************************************************************************* THE INDEPENDENT SET COARSENING LOOP: ******************************************************************************/ /*--------------------------------------------------- * Loop until all points are either fine or coarse. *---------------------------------------------------*/ while (1) { big_graph_size = (HYPRE_BigInt) graph_size; /* stop the coarsening if nothing left to be coarsened */ hypre_MPI_Allreduce(&big_graph_size, &global_graph_size, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm); /* if (my_id == 0) { hypre_printf("graph size %b\n", global_graph_size); } */ if (global_graph_size == 0) { break; } /* hypre_printf("\n"); hypre_printf("*** MIS iteration %d\n",iter); hypre_printf("graph_size remaining %d\n",graph_size); */ /*----------------------------------------------------------------------------------------- * Pick an independent set of points with maximal measure * At the end, CF_marker is complete, but still needs to be communicated to CF_marker_offd * for CF_init == 1, as in HMIS, the first IS was fed from prior R-S coarsening *----------------------------------------------------------------------------------------*/ if (!CF_init || iter) { /* hypre_BoomerAMGIndepSet(S, measure_array, graph_array, graph_size, graph_array_offd, graph_offd_size, CF_marker, CF_marker_offd); */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if (measure_array[i] > 1) { CF_marker[i] = 1; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_offd_size; ig++) { i = graph_array_offd[ig]; if (measure_array[i + num_variables] > 1) { CF_marker_offd[i] = 1; } } /*------------------------------------------------------- * Remove nodes from the initial independent set *-------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i, jS, j, jj) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; if (measure_array[i] > 1) { /* for each local neighbor j of i */ for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++) { j = S_diag_j[jS]; if (measure_array[j] > 1) { if (measure_array[i] > measure_array[j]) { CF_marker[j] = 0; } else if (measure_array[j] > measure_array[i]) { CF_marker[i] = 0; } } } /* for each offd neighbor j of i */ for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++) { jj = S_offd_j[jS]; j = num_variables + jj; if (measure_array[j] > 1) { if (measure_array[i] > measure_array[j]) { CF_marker_offd[jj] = 0; } else if (measure_array[j] > measure_array[i]) { CF_marker[i] = 0; } } } } /* for each node with measure > 1 */ } /* for each node i */ /*------------------------------------------------------------------------------ * Exchange boundary data for CF_marker: send external CF to internal CF *------------------------------------------------------------------------------*/ if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, CF_marker_offd, int_buf_data); hypre_ParCSRCommHandleDestroy(comm_handle); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { elmt = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j); if (!int_buf_data[index] && CF_marker[elmt] > 0) { CF_marker[elmt] = 0; index++; } else { int_buf_data[index++] = CF_marker[elmt]; } } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } } /* if (!CF_init || iter) */ iter++; /*------------------------------------------------ * Set C-pts and F-pts. *------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ig, i, jS, j) HYPRE_SMP_SCHEDULE #endif for (ig = 0; ig < graph_size; ig++) { i = graph_array[ig]; /*--------------------------------------------- * If the measure of i is smaller than 1, then * make i and F point (because it does not influence * any other point) *---------------------------------------------*/ if (measure_array[i] < 1) { CF_marker[i] = F_PT; } /*--------------------------------------------- * First treat the case where point i is in the * independent set: make i a C point, *---------------------------------------------*/ if (CF_marker[i] > 0) { CF_marker[i] = C_PT; } /*--------------------------------------------- * Now treat the case where point i is not in the * independent set: loop over * all the points j that influence equation i; if * j is a C point, then make i an F point. *---------------------------------------------*/ else { /* first the local part */ for (jS = S_diag_i[i]; jS < S_diag_i[i + 1]; jS++) { /* j is the column number, or the local number of the point influencing i */ j = S_diag_j[jS]; if (CF_marker[j] > 0) /* j is a C-point */ { CF_marker[i] = F_PT; } } /* now the external part */ for (jS = S_offd_i[i]; jS < S_offd_i[i + 1]; jS++) { j = S_offd_j[jS]; if (CF_marker_offd[j] > 0) /* j is a C-point */ { CF_marker[i] = F_PT; } } } /* end else */ } /* end first loop over graph */ /* now communicate CF_marker to CF_marker_offd, to make sure that new external F points are known on this processor */ /*------------------------------------------------------------------------------ * Exchange boundary data for CF_marker: send internal points to external points *------------------------------------------------------------------------------*/ index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++) { int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)]; } } if (num_procs > 1) { comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } /*------------------------------------------------ * Update subgraph *------------------------------------------------*/ /*HYPRE_Int prefix_sum_workspace[2*(hypre_NumThreads() + 1)];*/ prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, 2 * (hypre_NumThreads() + 1), HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(ig,i) #endif { HYPRE_Int private_graph_size_cnt = 0; HYPRE_Int private_graph_offd_size_cnt = 0; HYPRE_Int ig_begin, ig_end; hypre_GetSimpleThreadPartition(&ig_begin, &ig_end, graph_size); HYPRE_Int ig_offd_begin, ig_offd_end; hypre_GetSimpleThreadPartition(&ig_offd_begin, &ig_offd_end, graph_offd_size); for (ig = ig_begin; ig < ig_end; ig++) { i = graph_array[ig]; if (CF_marker[i] != 0) /* C or F point */ { /* the independent set subroutine needs measure 0 for removed nodes */ measure_array[i] = 0; } else { private_graph_size_cnt++; } } for (ig = ig_offd_begin; ig < ig_offd_end; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i] != 0) /* C of F point */ { /* the independent set subroutine needs measure 0 for removed nodes */ measure_array[i + num_variables] = 0; } else { private_graph_offd_size_cnt++; } } hypre_prefix_sum_pair(&private_graph_size_cnt, &graph_size, &private_graph_offd_size_cnt, &graph_offd_size, prefix_sum_workspace); for (ig = ig_begin; ig < ig_end; ig++) { i = graph_array[ig]; if (CF_marker[i] == 0) { graph_array2[private_graph_size_cnt++] = i; } } for (ig = ig_offd_begin; ig < ig_offd_end; ig++) { i = graph_array_offd[ig]; if (CF_marker_offd[i] == 0) { graph_array_offd2[private_graph_offd_size_cnt++] = i; } } } /* omp parallel */ HYPRE_Int *temp = graph_array; graph_array = graph_array2; graph_array2 = temp; temp = graph_array_offd; graph_array_offd = graph_array_offd2; graph_array_offd2 = temp; hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST); } /* end while */ /* hypre_printf("*** MIS iteration %d\n",iter); hypre_printf("graph_size remaining %d\n",graph_size); hypre_printf("num_cols_offd %d\n",num_cols_offd); for (i=0;i<num_variables;i++) { if(CF_marker[i] == 1) { hypre_printf("node %d CF %d\n",i,CF_marker[i]); } } */ /*--------------------------------------------------- * Clean up and return *---------------------------------------------------*/ hypre_TFree(measure_array, HYPRE_MEMORY_HOST); hypre_TFree(graph_array, HYPRE_MEMORY_HOST); hypre_TFree(graph_array2, HYPRE_MEMORY_HOST); hypre_TFree(graph_array_offd2, HYPRE_MEMORY_HOST); if (num_cols_offd) { hypre_TFree(graph_array_offd, HYPRE_MEMORY_HOST); } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); /*if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);*/ #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_PMIS] += hypre_MPI_Wtime(); #endif return (ierr); } HYPRE_Int hypre_BoomerAMGCoarsenPMIS( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int CF_init, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("PMIS"); #endif HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); if (exec == HYPRE_EXEC_DEVICE) { ierr = hypre_BoomerAMGCoarsenPMISDevice( S, A, CF_init, debug_flag, CF_marker_ptr ); } else #endif { ierr = hypre_BoomerAMGCoarsenPMISHost( S, A, CF_init, debug_flag, CF_marker_ptr ); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } HYPRE_Int hypre_BoomerAMGCoarsenHMIS( hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int measure_type, HYPRE_Int cut_factor, HYPRE_Int debug_flag, hypre_IntArray **CF_marker_ptr) { HYPRE_Int ierr = 0; /*------------------------------------------------------- * Perform Ruge coarsening followed by CLJP coarsening *-------------------------------------------------------*/ ierr += hypre_BoomerAMGCoarsenRuge (S, A, measure_type, 10, cut_factor, debug_flag, CF_marker_ptr); ierr += hypre_BoomerAMGCoarsenPMISHost (S, A, 1, debug_flag, CF_marker_ptr); return (ierr); }
matrix.h
/** * @file matrix.h This code provide a templated matrix implementation * @author TPOC: palisade@njit.edu * * @copyright Copyright (c) 2017, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef LBCRYPTO_MATH_MATRIX_H #define LBCRYPTO_MATH_MATRIX_H #include <iostream> #include <functional> #include <cmath> #include <stdexcept> #include <omp.h> //using std::function; #include "../math/backend.h" #include "../math/nbtheory.h" #include "../math/distrgen.h" #include "../lattice/poly.h" #include "../lattice/dcrtpoly.h" #include "../encoding/intplaintextencoding.h" #include "../encoding/packedintplaintextencoding.h" #include "../utils/inttypes.h" #include "../utils/utilities.h" #include "../utils/memory.h" using std::invalid_argument; namespace lbcrypto { template<class Element> class Matrix : public Serializable { public: typedef vector<vector<unique_ptr<Element>>> data_t; typedef std::function<unique_ptr<Element>(void)> alloc_func; /** * Constructor that initializes matrix values using a zero allocator * * @param &allocZero lambda function for zero initialization. * @param &rows number of rows. * @param &rows number of columns. */ Matrix(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) { data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * Constructor that initializes matrix values using a distribution generation allocator * * @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects) * @param &rows number of rows. * @param &rows number of columns. * @param &allocGen lambda function for intialization using a distribution generator. */ Matrix(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen); /** * Constructor of an empty matrix; SetSize must be called on this matrix to use it * Basically this exists to support deserializing * * @param &allocZero lambda function for zero initialization. */ Matrix(alloc_func allocZero) : data(), rows(0), cols(0), allocZero(allocZero) {} void SetSize(size_t rows, size_t cols) { if( this->rows != 0 || this->cols != 0 ) throw std::logic_error("You cannot SetSize on a non-empty matrix"); this->rows = rows; this->cols = cols; data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * Copy constructor * * @param &other the matrix object to be copied */ Matrix(const Matrix<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) { deepCopyData(other.data); } /** * Assignment operator * * @param &other the matrix object whose values are to be copied * @return the resulting matrix */ Matrix<Element>& operator=(const Matrix<Element>& other); /** * In-place change of the current matrix to a matrix of all ones * * @return the resulting matrix */ Matrix<Element>& Ones(); /** * Fill matrix using the same element * * @param &val the element the matrix is filled by * * @return the resulting matrix */ Matrix<Element>& Fill(const Element &val); /** * In-place change of the current matrix to Identity matrix * * @return the resulting matrix */ Matrix<Element>& Identity(); /** * Sets the first row to be powers of two * * @return the resulting matrix */ Matrix<Element> GadgetVector() const; /** * Computes the infinity norm * * @return the norm in double format */ double Norm() const; /** * Matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> Mult(Matrix<Element> const& other) const; /** * Operator for matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ Matrix<Element> operator*(Matrix<Element> const& other) const { return Mult(other); } /** * Multiplication of matrix by a scalar * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> ScalarMult(Element const& other) const { Matrix<Element> result(*this); #if 0 for (size_t row = 0; row < result.rows; ++row) { for (size_t col = 0; col < result.cols; ++col) { *result.data[row][col] = *result.data[row][col] * other; } } #else #pragma omp parallel for for (size_t col = 0; col < result.cols; ++col) { for (size_t row = 0; row < result.rows; ++row) { *result.data[row][col] = *result.data[row][col] * other; } } #endif return result; } /** * Operator for scalar multiplication * * @param &other the multiplier element * @return the result of multiplication */ Matrix<Element> operator*(Element const& other) const { return ScalarMult(other); } /** * Equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool Equal(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { return false; } for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { if (*data[i][j] != *other.data[i][j]) { return false; } } } return true; } /** * Operator for equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator==(Matrix<Element> const& other) const { return Equal(other); } /** * Operator for non-equality check * * @param &other the matrix object to compare to * @return the boolean result */ bool operator!=(Matrix<Element> const& other) const { return !Equal(other); } /** * Get property to access the data as a vector of vectors * * @return the data as vector of vectors */ const data_t& GetData() const { return data; } /** * Get property to access the number of rows in the matrix * * @return the number of rows */ size_t GetRows() const { return rows; } /** * Get property to access the number of columns in the matrix * * @return the number of columns */ size_t GetCols() const { return cols; } /** * Get property to access the zero allocator for the matrix * * @return the lambda function corresponding to the element zero allocator */ alloc_func GetAllocator() const { return allocZero; } /** * Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method * * @param &format the enum value corresponding to coefficient or evaluation representation */ void SetFormat(Format format); /** * Matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> Add(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { throw invalid_argument("Addition operands have incompatible dimensions"); } Matrix<Element> result(*this); #if 0 for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { *result.data[i][j] += *other.data[i][j]; } } #else #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { *result.data[i][j] += *other.data[i][j]; } } #endif return result; } /** * Operator for matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ Matrix<Element> operator+(Matrix<Element> const& other) const { return this->Add(other); } /** * Operator for in-place addition * * @param &other the matrix to be added * @return the resulting matrix (same object) */ Matrix<Element>& operator+=(Matrix<Element> const& other); /** * Matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> Sub(Matrix<Element> const& other) const { if (rows != other.rows || cols != other.cols) { throw invalid_argument("Subtraction operands have incompatible dimensions"); } Matrix<Element> result(allocZero, rows, other.cols); #if 0 for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { *result.data[i][j] = *data[i][j] - *other.data[i][j]; } } #else #pragma omp parallel for for (size_t j = 0; j < cols; ++j) { for (size_t i = 0; i < rows; ++i) { *result.data[i][j] = *data[i][j] - *other.data[i][j]; } } #endif return result; } /** * Operator for matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ Matrix<Element> operator-(Matrix<Element> const& other) const { return this->Sub(other); } /** * Operator for in-place matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix (same object) */ Matrix<Element>& operator-=(Matrix<Element> const& other); /** * Matrix transposition * * @return the resulting matrix */ Matrix<Element> Transpose() const; // YSP The signature of this method needs to be changed in the future /** * Matrix determinant - found using Laplace formula with complexity O(d!), where d is the dimension * * @param *result where the result is stored */ void Determinant(Element *result) const; //Element Determinant() const; /** * Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j} * * @return the cofactor matrix for the given matrix */ Matrix<Element> CofactorMatrix() const; /** * Add rows to bottom of the matrix * * @param &other the matrix to be added to the bottom of current matrix * @return the resulting matrix */ Matrix<Element>& VStack(Matrix<Element> const& other); /** * Add columns the right of the matrix * * @param &other the matrix to be added to the right of current matrix * @return the resulting matrix */ Matrix<Element>& HStack(Matrix<Element> const& other); /** * Matrix indexing operator - writeable instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element& operator()(size_t row, size_t col) { return *data[row][col]; } /** * Matrix indexing operator - read-only instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ Element const& operator()(size_t row, size_t col) const { return *data[row][col]; } /** * Matrix row extractor * * @param &row row index * @return the row at the index */ Matrix<Element> ExtractRow(size_t row) const { Matrix<Element> result(this->allocZero,1,this->cols); int i = 0; for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) { result(0,i) = **elem; i++; } return result; //return *this; } /** * Print values of the matrix to the cout stream * */ void PrintValues() const; // this is a hack for Matrix friend std::ostream& operator<<(std::ostream& out, const Matrix<Element>& item) { item.PrintValues(); return out; } /** * Call switch format for each (ring) element * */ void SwitchFormat(); /* * Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each * row of the matrix to be added and placed into the corresponding position in the output vector. */ Matrix<Element> MultByUnityVector() const; /* * Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select * elements in each row together. * Return a vector that is a rows x 1 matrix. */ Matrix<Element> MultByRandomVector(std::vector<int> ranvec) const; /** * Serialize the object into a Serialized * @param serObj is used to store the serialized result. It MUST be a rapidjson Object (SetObject()); * @return true if successfully serialized */ bool Serialize(Serialized* serObj) const; /** * Populate the object from the deserialization of the Serialized * @param serObj contains the serialized object * @return true on success */ bool Deserialize(const Serialized& serObj); private: data_t data; size_t rows; size_t cols; alloc_func allocZero; //mutable int NUM_THREADS = 1; //deep copy of data - used for copy constructor void deepCopyData(data_t const& src); }; /** * Operator for scalar multiplication of matrix * * @param &e element * @param &M matrix * @return the resulting matrix */ template<class Element> Matrix<Element> operator*(Element const& e, Matrix<Element> const& M) { return M.ScalarMult(e); } /** * Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ Matrix<BigInteger> Rotate(Matrix<Poly> const& inMat); /** * Each element becomes a square matrix with columns of that element's * rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ Matrix<BigVector> RotateVecResult(Matrix<Poly> const& inMat); /** * Stream output operator * * @param &os stream * @param &m matrix to be outputted * @return the chained stream */ template<class Element> std::ostream& operator<<(std::ostream& os, const Matrix<Element>& m); /** * Gives the Choleshky decomposition of the input matrix. * The assumption is that covariance matrix does not have large coefficients because it is formed by * discrete gaussians e and s; this implies int32_t can be used * This algorithm can be further improved - see the Darmstadt paper section 4.4 * http://eprint.iacr.org/2013/297.pdf * * @param &input the matrix for which the Cholesky decomposition is to be computed * @return the resulting matrix of floating-point numbers */ Matrix<double> Cholesky(const Matrix<int32_t> &input); void Cholesky(const Matrix<int32_t> &input, Matrix<double> &result); /** * Convert a matrix of integers from BigInteger to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigInteger> &input, const BigInteger& modulus); /** * Convert a matrix of BigVector to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ Matrix<int32_t> ConvertToInt32(const Matrix<BigVector> &input, const BigInteger& modulus); /** * Split a vector of int32_t into a vector of ring elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ Matrix<Poly> SplitInt32IntoPolyElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<ILParams> params); /** * Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ Matrix<Poly> SplitInt32AltIntoPolyElements(Matrix<int32_t> const& other, size_t n, const shared_ptr<ILParams> params); } #endif // LBCRYPTO_MATH_MATRIX_H
GB_unop__round_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__round_fp64_fp64 // op(A') function: GB_unop_tran__round_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = round (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = round (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = round (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ROUND || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__round_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = round (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__round_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
prefix_sum.c
#include "alphasparse/util/prefix_sum.h" #include <stdio.h> #include <string.h> #include "alphasparse/util/malloc.h" #include "alphasparse/util/thread.h" #ifdef __aarch64__ #include <arm_neon.h> #endif static void vector_add(int* inout, int increment, size_t len) { size_t i = 0; #ifdef __aarch64__ int32x4_t v0, v1, v2, v3; int32x4_t v_inc = vdupq_n_s32(increment); for (; i + 15 < len; i += 16) { v0 = vld1q_s32((void*)(inout + i)); v1 = vld1q_s32((void*)(inout + i + 4)); v2 = vld1q_s32((void*)(inout + i + 8)); v3 = vld1q_s32((void*)(inout + i + 12)); v0 = vaddq_s32(v_inc, v0); v1 = vaddq_s32(v_inc, v1); v2 = vaddq_s32(v_inc, v2); v3 = vaddq_s32(v_inc, v3); vst1q_s32(inout + i, v0); vst1q_s32(inout + i + 4, v1); vst1q_s32(inout + i + 8, v2); vst1q_s32(inout + i + 12, v3); } #endif for (; i < len; i++) { inout[i] += increment; } } inline void prefix_sum_single_thread(prefix_sum_type_t scan_type, const int32_t* source, const size_t len, int32_t* output) { if (len <= 0) { printf("input array is empty\n"); exit(-1); } if (scan_type == INC_SCAN) { output[0] = source[0]; for (size_t i = 1; i < len; i++) { output[i] = source[i] + output[i - 1]; } } else { // in case in-place int pre = source[0]; int cur = 0; output[0] = 0; for (size_t i = 1; i < len; i++) { cur = source[i]; output[i] = pre + output[i - 1]; pre = cur; } } } void prefix_sum(prefix_sum_type_t scan_type, const int32_t* source, const size_t len, int32_t* output) { #ifdef _OPENMP int thread_num = alpha_get_thread_num(); #else int thread_num = 1; #endif int* temp_buffer = (int*)alpha_malloc(sizeof(int32_t) * thread_num); const int len_per_thread = len / thread_num; memset(temp_buffer, 0, (sizeof(int32_t) * thread_num)); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const int tid = alpha_get_thread_id(); const int start = len * tid / thread_num; const int end = len * (tid + 1) / thread_num; if (end > start) { if (scan_type == EXL_SCAN) { temp_buffer[tid] = source[end - 1]; } else if (scan_type == INC_SCAN) { temp_buffer[tid] = 0; } prefix_sum_single_thread(scan_type, source + start, end - start, output + start); temp_buffer[tid] += output[end - 1]; } else { temp_buffer[tid] = 0; } } prefix_sum_single_thread(EXL_SCAN, temp_buffer, thread_num, temp_buffer); #ifdef _OPENMP #pragma omp parallel num_threads(thread_num) #endif { const int tid = alpha_get_thread_id(); const int start = len * tid / thread_num; const int end = len * (tid + 1) / thread_num; vector_add(output + start, temp_buffer[tid], end - start); } alpha_free(temp_buffer); }
fib.c
#include <stdio.h> #include <omp.h> u_int32_t fib(int n) { if (n < 2) return n; else { return fib(n-1) + fib(n-2); } } int main(int argc, char* argv[]) { int i = 0; int k = atoi(argv[1]); // Use only as many threads as are available #ifdef _OPENMP omp_set_num_threads(omp_get_num_procs()); #endif // Privatizes i, and lets each thread compute "round robin"-style, thus // preventing only one thread executing the last and heaviest block. #pragma omp parallel private(i) { #pragma omp for schedule(dynamic, 1) for (i=0; i<=k; i++) { printf("Fib(%d): %u\n", i, fib(i)); } } }
sigmoid_arm_func.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef SIGMOID_OP #pragma once #include <cmath> #include "operators/op_param.h" #ifdef __ARM_NEON #include <arm_neon.h> #include "operators/math/math_func_neon.h" #endif namespace paddle_mobile { namespace operators { using framework::DDim; void sigmoid(const Tensor *X, Tensor *Y) { #ifdef __ARM_NEON const float *input = X->data<float>(); float *output = Y->mutable_data<float>(); const DDim &dDim = X->dims(); int axis_index = 1; if (dDim.size() < 4) { axis_index = 0; } DDim outer_ddim = paddle_mobile::framework::slice_ddim(dDim, 0, axis_index + 1); DDim inner_ddim = paddle_mobile::framework::slice_ddim(dDim, axis_index + 1, dDim.size()); int out_size = paddle_mobile::framework::product(outer_ddim); int inner_size = paddle_mobile::framework::product(inner_ddim); DLOG << "outsize=" << out_size; DLOG << "innersize=" << inner_size; #pragma omp parallel for for (int i = 0; i < out_size; ++i) { const float *input_outer_ptr = input + i * inner_size; float *output_outer_ptr = output + i * inner_size; int nn = inner_size >> 2; int remain = inner_size - (nn << 2); float32x4_t _one = vdupq_n_f32(1.f); for (; nn > 0; nn--) { float32x4_t data = vld1q_f32(input_outer_ptr); data = vnegq_f32(data); data = exp_ps(data); data = vaddq_f32(data, _one); float32x4_t out_data = vrecpeq_f32(data); out_data = vmulq_f32(vrecpsq_f32(data, out_data), out_data); vst1q_f32(output_outer_ptr, out_data); input_outer_ptr += 4; output_outer_ptr += 4; } for (; remain > 0; remain--) { *output_outer_ptr = 1.f / (1.f + exp(-*input_outer_ptr)); output_outer_ptr++; input_outer_ptr++; } } #else #endif } template <typename P> void SigmoidCompute(const SigmoidParam<CPU> &param) { const Tensor *in_x = param.InputX(); Tensor *out = param.Out(); auto x_dims = in_x->dims(); out->Resize(x_dims); sigmoid(in_x, out); } } // namespace operators } // namespace paddle_mobile #endif
6806.c
/* * Compile using the command: * `cc 27Stencil.c -o oa -fopenmp -lm` */ #include <math.h> #include <omp.h> #include <stdint.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #ifdef _OPENACC #include <openacc.h> #endif #define DEFAULT_DATASIZE 1048576 /* Default datasize. */ #define DEFAULT_REPS 10 /* Default repetitions. */ #define CONF95 1.96 #define ITERATIONS 10 #define FAC (1./26) #define TOLERANCE 1.0e-15 extern int reps; /* Repetitions. */ extern double *times; /* Array to store results in. */ extern int flag; /* Flag to set CPU or GPU invocation. */ extern unsigned int datasize; /* Datasize passed to benchmark functions. */ unsigned int datasize = -1; /* Datasize for tests in bytes. */ int reps = -1; /* Repetitions. */ double *times; /* Array of doubles storing the benchmark times in microseconds. */ double testtime; /* The average test time in microseconds for reps runs. */ double testsd; /* The standard deviation in the test time in microseconds for reps runs. */ int flag = 0; /* 0 indicates CPU. */ /* * Function prototypes for common functions. */ void init(int argc, char **argv); void finalisetest(char *); void finalise(void); void benchmark(char *, double (*test)(void)); void print_results(char *, double, double); /* Forward Declarations of utility functions*/ double max_diff(double *, double *, int); void wul(); void usage(char *argv[]) { printf("Usage: %s \n" "\t--reps <repetitions> (default %d)\n" "\t--datasize <datasize> (default %d bytes)\n", argv[0], DEFAULT_REPS, DEFAULT_DATASIZE); } /* * This function parses the parameters from the command line. */ void parse_args(int argc, char *argv[]) { int arg; for (arg = 1; arg < argc; arg++) { if (strcmp(argv[arg], "--reps") == 0) { reps = atoi(argv[++arg]); if (reps == 0) { printf("Invalid integer:--reps: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "--datasize") == 0) { datasize = atoi(argv[++arg]); if (datasize == 0) { printf("Invalid integer:--datasize: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } else if (strcmp(argv[arg], "-h") == 0) { usage(argv); exit(EXIT_SUCCESS); } else { printf("Invalid parameters: %s\n", argv[arg]); usage(argv); exit(EXIT_FAILURE); } } } void stats(double *mtp, double *sdp) { double meantime, totaltime, sumsq, mintime, maxtime, sd; int i, good_reps; mintime = 1.0e10; maxtime = 0.; totaltime = 0.; good_reps = 0; for (i = 0; i < reps; i++) { /* Skip entries where times is 0, this indicates an error occured */ if (times[i] != 0){ mintime = (mintime < times[i]) ? mintime : times[i]; maxtime = (maxtime > times[i]) ? maxtime : times[i]; totaltime += times[i]; good_reps++; } } meantime = totaltime / good_reps; sumsq = 0; for (i = 0; i < reps; i++) { if (times[i] != 0){ sumsq += (times[i] - meantime) * (times[i] - meantime); } } sd = sqrt(sumsq / good_reps); *mtp = meantime; *sdp = sd; } /* * This function prints the results of the tests. * If you use a compiler which sets a different preprocessor flag * you may wish to add it here. */ void print_results(char *name, double testtime, double testsd) { char compiler[20]; /* Set default compiler idetifier. */ sprintf(compiler, "COMPILER"); /* Set compiler identifier based on known preprocessor flags. */ #ifdef __PGI sprintf(compiler, "PGI"); #endif #ifdef __HMPP sprintf(compiler, "CAPS"); #endif //printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6); printf("%f\n", testtime*1e6); } /* * This function initialises the storage for the test results and set the defaults. */ void init(int argc, char **argv) { parse_args(argc, argv); if (reps == -1) { reps = DEFAULT_REPS; } if (datasize == (unsigned int)-1) { datasize = DEFAULT_DATASIZE; } times = (double *)malloc((reps) * sizeof(double)); /* #ifdef __PGI acc_init(acc_device_nvidia); // printf("PGI INIT\n"); #endif #ifdef __HMPP int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif #ifdef _CRAYC int a[5] = {1,2,3,4,5}; #pragma acc data copyin(a[0:5]) {} #endif */ } void finalise(void) { free(times); } /* * This function runs the benchmark specified. */ void benchmark(char *name, double (*test)(void)) { int i = 0; double tmp = 0; for (i=0; i<reps; i++) { tmp = test(); if (tmp == -10000){ printf("Memory allocation failure in %s\n", name); times[i] = 0; } else if (tmp == -11000){ printf("CPU/GPU mismatch in %s\n", name); times[i] = 0; } else{ times[i] = tmp; } } stats(&testtime, &testsd); //printf("in benchmark\n"); print_results(name, testtime, testsd); //printf("printed result\n"); } double stencil() { extern unsigned int datasize; int sz = cbrt((datasize/sizeof(double))/2); int i, j, k, iter; int n = sz-2; double fac = FAC; double t1, t2; double md; //printf("size = %d\n", sz); /* Work buffers, with halos */ double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz); double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz); double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz); double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz); if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){ /* Something went wrong in the memory allocation here, fail gracefully */ return(-10000); } /* initialize input array a0 */ /* zero all of array (including halos) */ //printf("size = %d\n", sz); for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = 0.0; //printf("%d\t", (i*sz*sz+j*sz+k)); } } } //printf("\n"); //int size_of_a0 = sizeof(a0) / sizeof(*a0); //printf("size of a0 = %d\n", size_of_a0); /* use random numbers to fill interior */ for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX); } } } /* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */ /* save initial input array for later GPU run */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; } } } //printf("Host computation\n"); /* run main computation on host */ for (iter = 0; iter < ITERATIONS; iter++) { for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ /* save result */ /* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k]; // printf("%lf\t", a0[i*sz*sz+j*sz+k]); } } } //int size = sizeof(host_result)/sizeof(host_result[0]); //for(i = 0; i < size; i++) { // printf("%lf\t", host_result[i]); //} //printf("\n"); /* copy initial array back to a0 */ /* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */ for (i = 0; i < sz; i++) { for (j = 0; j < sz; j++) { for (k = 0; k < sz; k++) { a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k]; } } } //printf("Starting acc pragma code\n"); t1 = omp_get_wtime(); #pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n) { for (iter = 0; iter < ITERATIONS; iter++) { #pragma omp parallel for schedule(static, 16) num_threads(8) for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { #pragma omp simd for (k = 1; k < n+1; k++) { a1[i*sz*sz+j*sz+k] = ( a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] + a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] + a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] + a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] + a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] + a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] + a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] + a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] + a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] + a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)] ) * fac; } } } #pragma acc parallel loop for (i = 1; i < n+1; i++) { #pragma acc loop for (j = 1; j < n+1; j++) { #pragma acc loop for (k = 1; k < n+1; k++) { a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k]; } } } } /* end iteration loop */ } /* end data region */ #pragma acc wait t2 = omp_get_wtime(); memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz); md = max_diff(&host_result[0],&device_result[0], sz); /* Free malloc'd memory to prevent leaks */ free(a0); free(a0_init); free(a1); free(host_result); free(device_result); //printf("md: %lf \t tolerance: %lf", md, TOLERANCE); if (md < TOLERANCE ){ //printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE); return(t2 - t1); } else{ // printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md); return(-11000); } } /* Utility Functions */ double max_diff(double *array1,double *array2, int sz) { double tmpdiff, diff; int i,j,k; int n = sz-2; diff=0.0; for (i = 1; i < n+1; i++) { for (j = 1; j < n+1; j++) { for (k = 1; k < n+1; k++) { tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]); //printf("diff: %lf", tmpdiff); if (tmpdiff > diff) diff = tmpdiff; } } } return diff; } /* * This function ensures the device is awake. * It is more portable than acc_init(). */ void wul(){ int data = 8192; double *arr_a = (double *)malloc(sizeof(double) * data); double *arr_b = (double *)malloc(sizeof(double) * data); int i = 0; if (arr_a==NULL||arr_b==NULL) { printf("Unable to allocate memory in wul.\n"); } for (i=0;i<data;i++){ arr_a[i] = (double) (rand()/(1.0+RAND_MAX)); } #pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data]) { #pragma acc parallel loop for (i=0;i<data;i++){ arr_b[i] = arr_a[i] * 2; } } if (arr_a[0] < 0){ printf("Error in WUL\n"); /* * This should never be called as rands should be in the range (0,1]. * This stops clever optimizers. */ } free(arr_a); free(arr_b); } int main(int argc, char **argv) { char testName[32]; //printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n"); /* Initialise storage for test results & parse input arguements. */ init(argc, argv); /* Ensure device is awake. */ wul(); sprintf(testName, "27S"); benchmark(testName, &stencil); /* Print results & free results storage */ finalise(); return EXIT_SUCCESS; }
path.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : path.c * Description : path drawing * * + This is part of libaroma, an embedded ui toolkit. * + 06/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_path_c__ #define __libaroma_path_c__ #include <aroma_internal.h> /* * Function : libaroma_path * Return Value: LIBAROMA_PATHP * Descriptions: create new path */ LIBAROMA_PATHP libaroma_path(float x, float y){ LIBAROMA_PATHP path = (LIBAROMA_PATHP) calloc(sizeof(LIBAROMA_PATH),1); if (!path){ ALOGW("libaroma_path alloc LIBAROMA_PATHP failed"); return NULL; } path->p=(LIBAROMA_PATH_POINTP) malloc(sizeof(LIBAROMA_PATH_POINT)*32); if (!path->p){ free(path); ALOGW("libaroma_path alloc path->p failed"); return NULL; } path->p[0].x=x; path->p[0].y=y; path->max.x=path->min.x=x; path->max.y=path->min.y=y; path->n=1; return path; } /* End of libaroma_path */ /* * Function : libaroma_path_free * Return Value: byte * Descriptions: free path */ byte libaroma_path_free(LIBAROMA_PATHP path){ if (!path){ return 0; } if (path->p){ free(path->p); } free(path); return 1; } /* End of libaroma_path_free */ /* * Function : libaroma_path_add * Return Value: byte * Descriptions: add point into path */ byte libaroma_path_add(LIBAROMA_PATHP path, float x, float y){ if (!path){ return 0; } if (!path->p){ return 0; } if (path->n%32==0){ LIBAROMA_PATH_POINTP newp = (LIBAROMA_PATH_POINTP) realloc( path->p,sizeof(LIBAROMA_PATH_POINT)*(path->n+32) ); if (!newp){ ALOGW("libaroma_path_add cannot realloc path->p"); return 0; } path->p = newp; } path->p[path->n].x=x; path->p[path->n].y=y; path->max.x=MAX(path->max.x,x); path->max.y=MAX(path->max.y,y); path->min.x=MIN(path->min.x,x); path->min.y=MIN(path->min.y,y); path->n++; return 1; } /* End of libaroma_path_add */ /* * Function : libaroma_path_curve_calc * Return Value: void * Descriptions: calculating bezier curve */ void libaroma_path_curve_calc( float t, float *x, float *y, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3){ float u = 1-t; float tt = t*t; float uu = u*u; float uuu = uu * u; float ttt = tt * t; /* calculating */ *x = uuu * x0; *x += 3 * uu * t * x1; *x += 3 * u * tt * x2; *x += ttt * x3; *y = uuu * y0; *y += 3 * uu * t * y1; *y += 3 * u * tt * y2; *y += ttt * y3; } /* End of libaroma_path_curve_calc */ /* * Function : _libaroma_path_curve_findpoint * Return Value: byte * Descriptions: find curve path points */ byte _libaroma_path_curve_findpoint( LIBAROMA_PATHP path, float t0, float t1, float x0, float y0, float x1, float y1, float x2, float y2, float x3, float y3, float xt0, float yt0, float xt1, float yt1 ){ if (t0==t1){ return 0; } float thalf = t0 + ((t1 - t0) / 2); float xt, yt; libaroma_path_curve_calc(thalf, &xt, &yt,x0,y0,x1,y1,x2,y2,x3,y3); if ((abs(xt-xt0)>=2)||(abs(yt-yt0)>=2)) { _libaroma_path_curve_findpoint( path,t0,thalf,x0,y0,x1,y1,x2,y2,x3,y3,xt0,yt0,xt,yt); } libaroma_path_add(path, xt, yt); if ((abs(xt-xt1)>=2)||(abs(yt-yt1)>=2)) { _libaroma_path_curve_findpoint( path,thalf,t1,x0,y0,x1,y1,x2,y2,x3,y3,xt,yt,xt1,yt1); } libaroma_path_add(path, xt1, yt1); return 1; } /* End of _libaroma_path_curve_findpoint */ /* * Function : libaroma_path_curve * Return Value: byte * Descriptions: add curve point */ byte libaroma_path_curve( LIBAROMA_PATHP path, int resolution, float x1, float y1, float x2, float y2, float x3, float y3 ){ if (!path){ return 0; } if (!path->p){ return 0; } if (resolution<1){ /* dynamic hi res curve calculation */ float x0 = path->p[path->n-1].x; float y0 = path->p[path->n-1].y; _libaroma_path_curve_findpoint( path,0,1,x0,y0,x1,y1,x2,y2,x3,y3,x0,y0,x3,y3); } else{ /* fixed resolution */ int i; float x0 = path->p[path->n-1].x; float y0 = path->p[path->n-1].y; int px = round(x0); int py = round(y0); for(i=0;i<resolution;i++){ float x, y; float t = i / ((float) (resolution-1)); libaroma_path_curve_calc(t,&x,&y,x0,y0,x1,y1,x2,y2,x3,y3); int rx = round(x); int ry = round(y); if ((px!=rx)||(py!=ry)){ libaroma_path_add(path, x, y); } } } return 1; } /* End of libaroma_path_curve */ /* * Function : libaroma_path_draw * Return Value: byte * Descriptions: draw path */ byte libaroma_path_draw( LIBAROMA_CANVASP dest, LIBAROMA_PATHP path, word color, byte alpha, byte is_mask, float aliasing){ if (!dest){ dest=libaroma_fb()->canvas; } if ((is_mask)&&(dest->alpha==NULL)){ return 0; } if (!path){ return 0; } if ((!is_mask)&&(alpha<1)){ return 1; } if (aliasing<=0){ aliasing=1; } if (aliasing>1){ aliasing=1; } /* fill */ if (path->n>1){ int miny = MAX(0,floor(path->min.y)); int maxy = MIN(dest->h-1,ceil(path->max.y)); int minx = MAX(0,floor(path->min.x)); int dwidth = MIN(dest->w,ceil(path->max.x))-minx; if (dwidth<1){ return 1; } float alias_sz = 1/aliasing; byte alphaaa=alpha*aliasing; if (is_mask==2){ alphaaa=255*aliasing; } int py=0; /* loop through the rows of the image. */ #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (py=miny;py<=maxy;py++) { bytep line=NULL; if (is_mask){ line = dest->alpha + py * dest->l + minx; } else{ line = calloc(dwidth,1); } float * nodes = (float *) malloc(sizeof(float) * path->n); int pyn; for (pyn=0;pyn<alias_sz;pyn++){ float fy = ((float) py)+(((float) pyn)*aliasing); int i, n=0, j=path->n-1; /* find nodes */ for (i=0;i<path->n;i++){ if ( ((path->p[i].y<fy)&&(path->p[j].y>=fy))|| ((path->p[j].y<fy)&&(path->p[i].y>=fy)) ){ nodes[n++] = ( (path->p[i].x+(fy-path->p[i].y)/(path->p[j].y-path->p[i].y)* (path->p[j].x-path->p[i].x))) - ((float) minx); } j = i; } /* there is nodes */ if (n>1){ i=0; while (i<n-1){ if (nodes[i]>nodes[i+1]){ float tmp=nodes[i]; nodes[i]=nodes[i+1]; nodes[i+1]=tmp; if (i>0){ i--; } } else{ i++; } } /* process alpha values */ for (i=0;i<n;i+=2){ if (nodes[i]>=dwidth){ break; } if (nodes[i+1]>0){ if (nodes[i]<0){ nodes[i]=0; } if (nodes[i+1]>dwidth){ nodes[i+1]=dwidth; } } else{ continue; } if (nodes[i+1]-nodes[i]<1){ continue; } if (aliasing==1){ int linex=(int) floor(nodes[i]); int linew=((int) floor(nodes[i+1]))-linex; memset(line+linex,alpha,linew); } else{ int px; /* left & right aliasing */ int linex=floor(nodes[i]); int linerx=floor(nodes[i+1]); if (is_mask!=2){ line[linex]= MIN(255,line[linex]+(1.0-fmod(nodes[i],1))*alphaaa); line[linerx]= MIN(255,line[linerx]+fmod(nodes[i+1],1)*alphaaa); } else{ line[linex]= MAX(0,((int) line[linex])-(1.0-fmod(nodes[i],1))*alphaaa); line[linerx]= MAX(0,((int) line[linerx])-fmod(nodes[i+1],1)*alphaaa); } linex++; int linew=linerx-linex; if (linew<1){ continue; } bytep cline=line+linex; int left=linew; #ifdef __ARM_NEON__ left=linew%8; if (linew>=8){ uint8x8_t ro = vmov_n_u8(alphaaa); if (is_mask!=2){ uint16x8_t v255 = vdupq_n_u16(alpha); for (px=0;px<linew-left;px+=8) { uint8x8_t op = vld1_u8(cline+px); vst1_u8(cline+px, vmovn_u16(vminq_u16(vaddl_u8(op, ro),v255))); } } else{ uint8x8_t v0 = vmov_n_u8(0); for (px=0;px<linew-left;px+=8) { uint8x8_t op = vld1_u8(cline+px); vst1_u8(cline+px, vmax_u8(vsub_u8(op,ro),v0)); } } } #endif if (is_mask!=2){ for (px=linew-left;px<linew;px++){ cline[px]=MIN(alpha,cline[px]+alphaaa); } } else{ for (px=linew-left;px<linew;px++){ cline[px]=MAX(0,((int) cline[px])-alphaaa); } } } } } } free(nodes); if (!is_mask){ /* process */ if (line!=NULL){ wordp color_line = dest->data + py * dest->l + minx; libaroma_alpha_mono(dwidth,color_line,color_line,color,line); free(line); } } } } return 1; } /* End of libaroma_path_draw */ #endif /* __libaroma_path_c__ */
469.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 1; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 64) for (t10 = t8; t10 <= (ny - 1 < t8 + 63 ? ny - 1 : t8 + 63); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 1; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 64) for (t10 = t8; t10 <= (ny - 1 < t8 + 63 ? ny - 1 : t8 + 63); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for private(t4,t6,t8,t10) for (t4 = 0; t4 <= nx - 2; t4 += 16) for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 64) for (t10 = t8; t10 <= (ny - 2 < t8 + 63 ? ny - 2 : t8 + 63); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
GB_binop__lor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__lor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lor_uint8) // A*D function (colscale): GB (_AxD__lor_uint8) // D*A function (rowscale): GB (_DxB__lor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__lor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__lor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lor_uint8) // C=scalar+B GB (_bind1st__lor_uint8) // C=scalar+B' GB (_bind1st_tran__lor_uint8) // C=A+scalar GB (_bind2nd__lor_uint8) // C=A'+scalar GB (_bind2nd_tran__lor_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = ((aij != 0) || (bij != 0)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) || (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOR || GxB_NO_UINT8 || GxB_NO_LOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lor_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lor_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__lor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__lor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) || (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) || (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) || (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) || (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
roi_align.c
#include <TH/TH.h> #include <math.h> #include <omp.h> void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* top_data); void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* top_data); int roi_align_forward(int aligned_height, int aligned_width, float spatial_scale, THFloatTensor * features, THFloatTensor * rois, THFloatTensor * output) { //Grab the input tensor float * data_flat = THFloatTensor_data(features); float * rois_flat = THFloatTensor_data(rois); float * output_flat = THFloatTensor_data(output); // Number of ROIs int num_rois = THFloatTensor_size(rois, 0); int size_rois = THFloatTensor_size(rois, 1); if (size_rois != 5) { return 0; } // data height int data_height = THFloatTensor_size(features, 2); // data width int data_width = THFloatTensor_size(features, 3); // Number of channels int num_channels = THFloatTensor_size(features, 1); // do ROIAlignForward ROIAlignForwardCpu(data_flat, spatial_scale, num_rois, data_height, data_width, num_channels, aligned_height, aligned_width, rois_flat, output_flat); return 1; } int roi_align_backward(int aligned_height, int aligned_width, float spatial_scale, THFloatTensor * top_grad, THFloatTensor * rois, THFloatTensor * bottom_grad) { //Grab the input tensor float * top_grad_flat = THFloatTensor_data(top_grad); float * rois_flat = THFloatTensor_data(rois); float * bottom_grad_flat = THFloatTensor_data(bottom_grad); // Number of ROIs int num_rois = THFloatTensor_size(rois, 0); int size_rois = THFloatTensor_size(rois, 1); if (size_rois != 5) { return 0; } // batch size int batch_size = THFloatTensor_size(bottom_grad, 0); // data height int data_height = THFloatTensor_size(bottom_grad, 2); // data width int data_width = THFloatTensor_size(bottom_grad, 3); // Number of channels int num_channels = THFloatTensor_size(bottom_grad, 1); // do ROIAlignBackward ROIAlignBackwardCpu(top_grad_flat, spatial_scale, num_rois, data_height, data_width, num_channels, aligned_height, aligned_width, rois_flat, bottom_grad_flat); return 1; } void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* top_data) { const int output_size = num_rois * aligned_height * aligned_width * channels; #pragma omp parallel for int idx; for (idx = 0; idx < output_size; ++idx) { // (n, c, ph, pw) is an element in the aligned output int pw = idx % aligned_width; int ph = (idx / aligned_width) % aligned_height; int c = (idx / aligned_width / aligned_height) % channels; int n = idx / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROI to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { top_data[idx] = 0.; } else { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; top_data[idx] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio) + bottom_data[upright] * (1. - h_ratio) * w_ratio + bottom_data[downleft] * h_ratio * (1. - w_ratio) + bottom_data[downright] * h_ratio * w_ratio; } } } void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* bottom_diff) { const int output_size = num_rois * aligned_height * aligned_width * channels; #pragma omp parallel for int idx; for (idx = 0; idx < output_size; ++idx) { // (n, c, ph, pw) is an element in the aligned output int pw = idx % aligned_width; int ph = (idx / aligned_width) % aligned_height; int c = (idx / aligned_width / aligned_height) % channels; int n = idx / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROI to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; bottom_diff[upleft] += top_diff[idx] * (1. - h_ratio) * (1. - w_ratio); bottom_diff[upright] += top_diff[idx] * (1. - h_ratio) * w_ratio; bottom_diff[downleft] += top_diff[idx] * h_ratio * (1. - w_ratio); bottom_diff[downright] += top_diff[idx] * h_ratio * w_ratio; } } }
NlpGPlacer.h
/** * @file NlpGPlacer.h * @brief The global placement solver with non-linear optimization * @author Keren Zhu * @date 03/29/2020 */ #ifndef IDEAPLACE_NLPGPLACER_H_ #define IDEAPLACE_NLPGPLACER_H_ #include <Eigen/Dense> #ifdef IDEAPLACE_TASKFLOR_FOR_GRAD_OBJ_ #include <taskflow/taskflow.hpp> #endif // IDEAPLACE_TASKFLOR_FOR_GRAD_OBJ #include "db/Database.h" #include "place/different.h" #include "place/differentSecondOrder.hpp" #include "place/nlp/nlpOuterOptm.hpp" #include "place/nlp/nlpInitPlace.hpp" #include "place/nlp/nlpTasks.hpp" #include "place/nlp/nlpTypes.hpp" #include "place/nlp/nlpOptmKernels.hpp" #include "place/nlp/nlpFirstOrderKernel.hpp" #include "place/nlp/nlpSecondOrderKernels.hpp" #include "place/nlp/conjugateGradientWnlib.hpp" // TODO: remove after no need #include "pinassign/VirtualPinAssigner.h" PROJECT_NAMESPACE_BEGIN namespace nlp { /* The wrapper of settings */ struct nlp_default_hyperparamters { }; struct nlp_default_types { typedef RealType nlp_coordinate_type; typedef RealType nlp_numerical_type; typedef Eigen::Matrix<nlp_numerical_type, Eigen::Dynamic, Eigen::Dynamic> EigenMatrix; typedef Eigen::Matrix<nlp_numerical_type, Eigen::Dynamic, 1> EigenVector; typedef Eigen::Map<EigenVector> EigenMap; typedef Eigen::DiagonalMatrix<nlp_numerical_type, Eigen::Dynamic> EigenDiagonalMatrix; typedef diff::LseHpwlDifferentiable<nlp_numerical_type, nlp_coordinate_type> nlp_hpwl_type; typedef diff::CellPairOverlapPenaltyDifferentiable<nlp_numerical_type, nlp_coordinate_type> nlp_ovl_type; typedef diff::CellOutOfBoundaryPenaltyDifferentiable<nlp_numerical_type, nlp_coordinate_type> nlp_oob_type; typedef diff::AsymmetryDifferentiable<nlp_numerical_type, nlp_coordinate_type> nlp_asym_type; typedef diff::CosineDatapathDifferentiable<nlp_numerical_type, nlp_coordinate_type> nlp_cos_type; typedef diff::PowerVerQuadraticWireLengthDifferentiable<nlp_numerical_type, nlp_coordinate_type> nlp_power_wl_type; typedef diff::CurrentFlowDifferentiable<nlp_numerical_type, nlp_coordinate_type> nlp_crf_type; }; struct nlp_default_zero_order_algorithms { typedef outer_stop_condition::stop_condition_list< outer_stop_condition::stop_after_violate_small, outer_stop_condition::stop_after_num_outer_iterations<1000> > stop_condition_type; typedef init_place::init_random_placement_with_normal_distribution_near_center init_place_type; /* multipliers */ typedef outer_multiplier::init::hard_code_init mult_init_type; typedef outer_multiplier::update::subgradient_normalized_by_init<nlp_default_types::nlp_numerical_type> mult_update_type; //typedef outer_multiplier::update::direct_subgradient mult_update_type; typedef outer_multiplier::mult_const_hpwl_cos_and_penalty_by_type<nlp_default_types::nlp_numerical_type, mult_init_type, mult_update_type> mult_type; }; struct nlp_default_first_order_algorithms { typedef converge::converge_list< converge::converge_grad_norm_by_init<nlp_default_types::nlp_numerical_type>, converge::converge_criteria_max_iter<3000> > converge_type; //typedef optm::first_order::naive_gradient_descent<converge_type> optm_type; typedef optm::first_order::adam<converge_type, nlp_default_types::nlp_numerical_type> optm_type; //typedef optm::first_order::nesterov<converge_type, nlp_default_types::nlp_numerical_type> optm_type; //typedef optm::first_order::conjugate_gradient_wnlib optm_type; /* multipliers */ typedef outer_multiplier::init::init_by_matching_gradient_norm mult_init_type; //typedef outer_multiplier::update::subgradient_normalized_by_init<nlp_default_types::nlp_numerical_type> mult_update_type; typedef outer_multiplier::update::direct_subgradient mult_update_type; typedef outer_multiplier::mult_const_hpwl_cos_and_penalty_by_type<nlp_default_types::nlp_numerical_type, mult_init_type, mult_update_type> mult_type; typedef outer_multiplier::update::match_grad_const_multipliers<nlp_default_types::nlp_numerical_type> mult_adjust_type; /* alpha */ typedef alpha::alpha_hpwl_ovl_oob<nlp_default_types::nlp_numerical_type> alpha_type; typedef alpha::update::alpha_update_list< alpha::update::reciprocal_by_obj<nlp_default_types::nlp_numerical_type, 1>, alpha::update::reciprocal_by_obj<nlp_default_types::nlp_numerical_type, 2>, alpha::update::reciprocal_by_obj<nlp_default_types::nlp_numerical_type, 3> > alpha_update_type; }; template<typename nlp_types> struct nlp_default_second_order_settings { typedef diff::jacobi_hessian_approx_trait<typename nlp_types::nlp_hpwl_type> hpwl_hessian_trait; typedef diff::jacobi_hessian_approx_trait<typename nlp_types::nlp_ovl_type> ovl_hessian_trait; typedef diff::jacobi_hessian_approx_trait<typename nlp_types::nlp_oob_type> oob_hessian_trait; typedef diff::jacobi_hessian_approx_trait<typename nlp_types::nlp_asym_type> asym_hessian_trait; typedef diff::jacobi_hessian_approx_trait<typename nlp_types::nlp_cos_type> cos_hessian_trait; typedef diff::jacobi_hessian_approx_trait<typename nlp_types::nlp_power_wl_type> power_wl_hessian_trait; }; struct nlp_default_second_order_algorithms { typedef converge::converge_list< converge::converge_grad_norm_by_init<nlp_default_types::nlp_numerical_type>, converge::converge_criteria_max_iter<3000> > converge_type; //typedef optm::second_order::naive_gradient_descent<converge_type> optm_type; typedef optm::second_order::adam<converge_type, nlp_default_types::nlp_numerical_type> optm_type; //typedef optm::second_order::nesterov<converge_type, nlp_default_types::nlp_numerical_type> optm_type; //typedef optm::first_order::conjugate_gradient_wnlib optm_type; /* multipliers */ typedef outer_multiplier::init::init_by_matching_gradient_norm mult_init_type; //typedef outer_multiplier::update::subgradient_normalized_by_init<nlp_default_types::nlp_numerical_type> mult_update_type; typedef outer_multiplier::update::direct_subgradient mult_update_type; typedef outer_multiplier::mult_const_hpwl_cos_and_penalty_by_type<nlp_default_types::nlp_numerical_type, mult_init_type, mult_update_type> mult_type; typedef outer_multiplier::update::match_grad_const_multipliers<nlp_default_types::nlp_numerical_type> mult_adjust_type; /* alpha */ typedef alpha::alpha_hpwl_ovl_oob<nlp_default_types::nlp_numerical_type> alpha_type; typedef alpha::update::alpha_update_list< alpha::update::reciprocal_by_obj<nlp_default_types::nlp_numerical_type, 1>, alpha::update::reciprocal_by_obj<nlp_default_types::nlp_numerical_type, 2> > alpha_update_type; }; struct nlp_default_settings { typedef nlp_default_zero_order_algorithms nlp_zero_order_algorithms_type; typedef nlp_default_first_order_algorithms nlp_first_order_algorithms_type; typedef nlp_default_hyperparamters nlp_hyperparamters_type; typedef nlp_default_types nlp_types_type; typedef nlp_default_second_order_settings<nlp_types_type> nlp_second_order_setting_type; typedef nlp_default_second_order_algorithms nlp_second_order_algorithms_type; }; }// namespace nlp /// @brief non-linear programming-based analog global placement template<typename nlp_settings> class NlpGPlacerBase { public: typedef typename nlp_settings::nlp_types_type nlp_types; typedef typename nlp_settings::nlp_zero_order_algorithms_type nlp_zero_order_algorithms; typedef typename nlp_settings::nlp_hyperparamters_type nlp_hyperparamters; typedef typename nlp_types::EigenMatrix EigenMatrix; typedef typename nlp_types::EigenVector EigenVector; typedef typename nlp_types::EigenMap EigenMap; typedef typename nlp_types::nlp_coordinate_type nlp_coordinate_type; typedef typename nlp_types::nlp_numerical_type nlp_numerical_type; typedef typename nlp_types::nlp_hpwl_type nlp_hpwl_type; typedef typename nlp_types::nlp_ovl_type nlp_ovl_type; typedef typename nlp_types::nlp_oob_type nlp_oob_type; typedef typename nlp_types::nlp_asym_type nlp_asym_type; typedef typename nlp_types::nlp_cos_type nlp_cos_type; typedef typename nlp_types::nlp_power_wl_type nlp_power_wl_type; typedef typename nlp_types::nlp_crf_type nlp_crf_type; /* algorithms */ typedef typename nlp_zero_order_algorithms::stop_condition_type stop_condition_type; typedef nlp::outer_stop_condition::stop_condition_trait<stop_condition_type> stop_condition_trait; template<typename _T> friend struct nlp::outer_stop_condition::stop_condition_trait; typedef typename nlp_zero_order_algorithms::init_place_type init_placement_type; typedef nlp::init_place::init_place_trait<init_placement_type> init_place_trait; friend init_place_trait; typedef typename nlp_zero_order_algorithms::mult_init_type mult_init_type; typedef nlp::outer_multiplier::init::multiplier_init_trait<mult_init_type> mult_init_trait; friend mult_init_trait; typedef typename nlp_zero_order_algorithms::mult_update_type mult_update_type; typedef nlp::outer_multiplier::update::multiplier_update_trait<mult_update_type> mult_update_trait; friend mult_update_trait; typedef typename nlp_zero_order_algorithms::mult_type mult_type; typedef nlp::outer_multiplier::multiplier_trait<mult_type> mult_trait; friend mult_trait; public: explicit NlpGPlacerBase(Database &db) : _db(db) {} IntType solve(); protected: void assignIoPins(); /* calculating obj */ void calcObj() { _wrapObjAllTask.run(); } /* Init functions */ virtual void initProblem(); void initHyperParams(); void initBoundaryParams(); void initVariables(); void initPlace(); void initOperators(); void initOptimizationKernelMembers(); /* Output functions */ void writeOut(); /* Util functions */ IndexType plIdx(IndexType cellIdx, Orient2DType orient); void alignToSym(); /* construct tasks */ virtual void constructTasks(); // Obj-related void constructObjTasks(); void constructObjectiveCalculationTasks(); void constructSumObjTasks(); #ifdef DEBUG_SINGLE_THREAD_GP void constructWrapObjTask(); #endif /* Optimization kernel */ virtual void optimize(); /* build the computational graph */ /* Debugging function */ #ifdef DEBUG_GR #ifdef DEBUG_DRAW void drawCurrentLayout(const std::string &name); #endif #endif protected: Database &_db; ///< The placement engine database /* NLP problem parameters */ IndexType _numCells; ///< The number of cells RealType _alpha; ///< Used in LSE approximation hyperparameter Box<nlp_coordinate_type> _boundary; ///< The boundary constraint for the placement nlp_coordinate_type _scale = 0.01; /// The scale ratio between float optimization kernel coordinate and placement database coordinate unit nlp_coordinate_type _totalCellArea = 0; ///< The total cell area of the problem nlp_coordinate_type _defaultSymAxis = 0.0; ///< The default symmetric axis IndexType _numVariables = 0; ///< The number of variables /* Optimization internal results */ nlp_numerical_type _objHpwl = 0.0; ///< The current value for hpwl nlp_numerical_type _objOvl = 0.0; ///< The current value for overlapping penalty nlp_numerical_type _objOob = 0.0; ///< The current value for out of boundary penalty nlp_numerical_type _objAsym = 0.0; ///< The current value for asymmetry penalty nlp_numerical_type _objCos = 0.0; ///< The current value for the cosine signal path penalty nlp_numerical_type _objPowerWl = 0.0; ///< power wire length nlp_numerical_type _objCrf = 0.0; ///< Current flow nlp_numerical_type _obj = 0.0; ///< The current value for the total objective penalty nlp_numerical_type _objHpwlRaw = 0.0; ///< The current value for hpwl nlp_numerical_type _objOvlRaw = 0.0; ///< The current value for overlapping penalty nlp_numerical_type _objOobRaw = 0.0; ///< The current value for out of boundary penalty nlp_numerical_type _objAsymRaw = 0.0; ///< The current value for asymmetry penalty nlp_numerical_type _objCosRaw = 0.0; ///< The current value for the cosine signal path penalty nlp_numerical_type _objPowrWlRaw = 0.0; ///< Power wire length nlp_numerical_type _objCrfRaw = 0.0; ///< Current flow /* NLP optimization kernel memebers */ stop_condition_type _stopCondition; /* Optimization data */ EigenVector _pl; ///< The placement solutions /* Tasks */ // Evaluating objectives std::vector<nt::Task<nt::EvaObjTask<nlp_numerical_type>>> _evaHpwlTasks; ///< The tasks for evaluating hpwl objectives std::vector<nt::Task<nt::EvaObjTask<nlp_numerical_type>>> _evaOvlTasks; ///< The tasks for evaluating overlap objectives std::vector<nt::Task<nt::EvaObjTask<nlp_numerical_type>>> _evaOobTasks; ///< The tasks for evaluating out of boundary objectives std::vector<nt::Task<nt::EvaObjTask<nlp_numerical_type>>> _evaAsymTasks; ///< The tasks for evaluating asymmetry objectives std::vector<nt::Task<nt::EvaObjTask<nlp_numerical_type>>> _evaCosTasks; ///< The tasks for evaluating signal path objectives std::vector<nt::Task<nt::EvaObjTask<nlp_numerical_type>>> _evaPowerWlTasks; std::vector<nt::Task<nt::EvaObjTask<nlp_numerical_type>>> _evaCrfTasks; ///< The tasks for evaluating current flow objectives // Sum the objectives nt::Task<nt::FuncTask> _sumObjHpwlTask; ///< The task for summing hpwl objective nt::Task<nt::FuncTask> _sumObjOvlTask; ///< The task for summing the overlapping objective nt::Task<nt::FuncTask> _sumObjOobTask; ///< The task for summing the out of boundary objective nt::Task<nt::FuncTask> _sumObjAsymTask; ///< The task for summing the asymmetry objective nt::Task<nt::FuncTask> _sumObjCosTask; ///< The task for summing the cosine signal path objective nt::Task<nt::FuncTask> _sumObjPowerWlTask; ///< The task for summing the cosine signal path objective nt::Task<nt::FuncTask> _sumObjCrfTask; ///< The task for summing the current flow objective nt::Task<nt::FuncTask> _sumObjAllTask; ///< The task for summing the different objectives together // Wrapper tasks for debugging nt::Task<nt::FuncTask> _wrapObjHpwlTask; ///< The task for wrap the objective nt::Task<nt::FuncTask> _wrapObjOvlTask; nt::Task<nt::FuncTask> _wrapObjOobTask; nt::Task<nt::FuncTask> _wrapObjAsymTask; nt::Task<nt::FuncTask> _wrapObjCosTask; nt::Task<nt::FuncTask> _wrapObjPowerWlTask; nt::Task<nt::FuncTask> _wrapObjCrfTask; ///< The wrapper for caculating the current flow objective nt::Task<nt::FuncTask> _wrapObjAllTask; /* Operators */ std::vector<nlp_hpwl_type> _hpwlOps; ///< The HPWL cost std::vector<nlp_ovl_type> _ovlOps; ///< The cell pair overlapping penalty operators std::vector<nlp_oob_type> _oobOps; ///< The cell out of boundary penalty operators std::vector<nlp_asym_type> _asymOps; ///< The asymmetric penalty operators std::vector<nlp_cos_type> _cosOps; ///< The signal flow operators std::vector<nlp_power_wl_type> _powerWlOps; std::vector<nlp_crf_type> _crfOps; ///< The current flow operators /* run time */ std::unique_ptr<::klib::StopWatch> _calcObjStopWatch; }; template<typename nlp_settings> inline IndexType NlpGPlacerBase<nlp_settings>::plIdx(IndexType cellIdx, Orient2DType orient) { if (orient == Orient2DType::HORIZONTAL) { return cellIdx; } else if (orient == Orient2DType::VERTICAL) { return cellIdx + _numCells; } else { #ifdef MULTI_SYM_GROUP return cellIdx + 2 * _numCells; // here cell index representing the idx of sym grp #else return 2 * _numCells; #endif } } /// @brief first-order optimization template<typename nlp_settings> class NlpGPlacerFirstOrder : public NlpGPlacerBase<nlp_settings> { public: typedef NlpGPlacerBase<nlp_settings> base_type; typedef typename base_type::EigenVector EigenVector; typedef typename base_type::nlp_hpwl_type nlp_hpwl_type; typedef typename base_type::nlp_ovl_type nlp_ovl_type; typedef typename base_type::nlp_oob_type nlp_oob_type; typedef typename base_type::nlp_asym_type nlp_asym_type; typedef typename base_type::nlp_cos_type nlp_cos_type; typedef typename base_type::nlp_power_wl_type nlp_power_wl_type; typedef typename base_type::nlp_crf_type nlp_crf_type; typedef typename nlp_settings::nlp_first_order_algorithms_type nlp_first_order_algorithms; typedef typename nlp_first_order_algorithms::converge_type converge_type; typedef typename nlp::converge::converge_criteria_trait<converge_type> converge_trait; typedef typename nlp_first_order_algorithms::optm_type optm_type; typedef typename nlp::optm::optm_trait<optm_type> optm_trait; friend converge_type; template<typename converge_criteria_type> friend struct nlp::converge::converge_criteria_trait; friend optm_type; friend optm_trait; typedef typename nlp_settings::nlp_first_order_algorithms_type::mult_init_type mult_init_type; typedef nlp::outer_multiplier::init::multiplier_init_trait<mult_init_type> mult_init_trait; friend mult_init_trait; typedef typename nlp_settings::nlp_first_order_algorithms_type::mult_update_type mult_update_type; typedef nlp::outer_multiplier::update::multiplier_update_trait<mult_update_type> mult_update_trait; friend mult_update_trait; typedef typename nlp_settings::nlp_first_order_algorithms_type::mult_type mult_type; typedef nlp::outer_multiplier::multiplier_trait<mult_type> mult_trait; friend mult_trait; typedef typename nlp_settings::nlp_first_order_algorithms_type::mult_adjust_type mult_adjust_type; typedef nlp::outer_multiplier::update::multiplier_update_trait<mult_adjust_type> mult_adjust_trait; friend mult_adjust_trait; /* updating alpha parameters */ typedef typename nlp_settings::nlp_first_order_algorithms_type::alpha_type alpha_type; typedef nlp::alpha::alpha_trait<alpha_type> alpha_trait; template<typename T> friend struct nlp::alpha::alpha_trait; typedef typename nlp_settings::nlp_first_order_algorithms_type::alpha_update_type alpha_update_type; typedef nlp::alpha::update::alpha_update_trait<alpha_update_type> alpha_update_trait; template<typename T> friend struct nlp::alpha::update::alpha_update_trait; NlpGPlacerFirstOrder(Database &db) : NlpGPlacerBase<nlp_settings>(db) {} void writeoutCsv() { std::string ver1f = "ver1.csv"; std::string ver2f = "ver2.csv"; std::ofstream ver1(ver1f.c_str()); std::ofstream ver2(ver2f.c_str()); ver1 << "x y val\n"; ver2 << "x y val\n"; for (RealType x = -8; x < 8; x+=(16.0/300)) { for (RealType y = -8; y < 8; y+=(16.0/300)) { this->_pl(this->plIdx(0, Orient2DType::HORIZONTAL)) = x; this->_pl(this->plIdx(0, Orient2DType::VERTICAL)) = y; this->_wrapObjAllTask.run(); auto obj = this->_obj; ver1 << x << " "<< y << " " << obj << "\n"; } } auto getLambda = [&](){ return 1.0; }; for (auto &op : this->_hpwlOps) { op._getLambdaFunc = [&](){ return 1.0; }; } for (auto &op : this->_cosOps) { op._getLambdaFunc = [&](){ return 1.0; }; } for (auto &op : this->_ovlOps) { op._getLambdaFunc = [&](){ return 1.0; }; } for (auto &op : this->_oobOps) { op._getLambdaFunc = [&](){ return 1.0; }; } for (auto &op : this->_asymOps) { op._getLambdaFunc = [&](){ return 1.0; }; } for (auto &op : this->_powerWlOps) { op._getLambdaFunc = [&](){ return 1.0; }; } for (auto &op : this->_crfOps) { op._getLambdaFunc = [&](){ return 1.0; }; } for (RealType x = -8; x < 8; x+=(16.0/300)) { for (RealType y = -8; y < 8; y+=(16.0/300)) { this->_pl(this->plIdx(0, Orient2DType::HORIZONTAL)) = x; this->_pl(this->plIdx(0, Orient2DType::VERTICAL)) = y; this->_wrapObjAllTask.run(); auto obj = this->_obj; ver2 << x << " "<< y << " " << obj << "\n"; } } } protected: /* calculating gradient */ void calcGrad() { _wrapCalcGradTask.run(); } /* Init */ virtual void initProblem() override; void initFirstOrderGrad(); /* Construct tasks */ virtual void constructTasks() override; void constructFirstOrderTasks(); void constructCalcPartialsTasks(); void constructUpdatePartialsTasks(); void constructClearGradTasks(); void constructSumGradTask(); void constructWrapCalcGradTask(); /* optimization */ virtual void optimize() override; /* Build the computational graph */ #ifdef IDEAPLACE_TASKFLOR_FOR_GRAD_OBJ_ void regCalcHpwlGradTaskFlow(tf::Taskflow &tfFlow); void regCalcOvlGradTaskFlow(tf::Taskflow &tfFlow); void regCalcOobGradTaskFlow(tf::Taskflow &tfFlow); void regCalcAsymGradTaskFlow(tf::Taskflow &tfFlow); void regCalcCosGradTaskFlow(tf::Taskflow &tfFlow); void regCalcAllGradTaskFlow(tf::Taskflow &tfFlow); void regCalcGradForLoop(tf::Taskflow &tfFlow); #endif protected: /* Optimization data */ EigenVector _grad; ///< The first order graident EigenVector _gradHpwl; ///< The first order gradient of hpwl objective EigenVector _gradOvl; ///< The first order gradient of overlapping objective EigenVector _gradOob; ///< The first order gradient of out of boundary objective EigenVector _gradAsym; ///< The first order gradient of asymmetry objective EigenVector _gradCos; ///< The first order gradient of cosine signal path objective EigenVector _gradPowerWl; EigenVector _gradCrf; /* Tasks */ // Calculate the partials std::vector<nt::Task<nt::CalculateOperatorPartialTask<nlp_hpwl_type, EigenVector>>> _calcHpwlPartialTasks; std::vector<nt::Task<nt::CalculateOperatorPartialTask<nlp_ovl_type, EigenVector>>> _calcOvlPartialTasks; std::vector<nt::Task<nt::CalculateOperatorPartialTask<nlp_oob_type, EigenVector>>> _calcOobPartialTasks; std::vector<nt::Task<nt::CalculateOperatorPartialTask<nlp_asym_type, EigenVector>>> _calcAsymPartialTasks; std::vector<nt::Task<nt::CalculateOperatorPartialTask<nlp_cos_type, EigenVector>>> _calcCosPartialTasks; std::vector<nt::Task<nt::CalculateOperatorPartialTask<nlp_power_wl_type, EigenVector>>> _calcPowerWlPartialTasks; std::vector<nt::Task<nt::CalculateOperatorPartialTask<nlp_crf_type, EigenVector>>> _calcCrfPartialTasks; // Update the partials std::vector<nt::Task<nt::UpdateGradientFromPartialTask<nlp_hpwl_type, EigenVector>>> _updateHpwlPartialTasks; std::vector<nt::Task<nt::UpdateGradientFromPartialTask<nlp_ovl_type, EigenVector>>> _updateOvlPartialTasks; std::vector<nt::Task<nt::UpdateGradientFromPartialTask<nlp_oob_type, EigenVector>>> _updateOobPartialTasks; std::vector<nt::Task<nt::UpdateGradientFromPartialTask<nlp_asym_type, EigenVector>>> _updateAsymPartialTasks; std::vector<nt::Task<nt::UpdateGradientFromPartialTask<nlp_cos_type, EigenVector>>> _updateCosPartialTasks; std::vector<nt::Task<nt::UpdateGradientFromPartialTask<nlp_power_wl_type, EigenVector>>> _updatePowerWlPartialTasks; std::vector<nt::Task<nt::UpdateGradientFromPartialTask<nlp_crf_type, EigenVector>>> _updateCrfPartialTasks; // Clear the gradient. Use to clear the _gradxxx records. Needs to call before updating the partials nt::Task<nt::FuncTask> _clearGradTask; //FIXME: not used right noe nt::Task<nt::FuncTask> _clearHpwlGradTask; nt::Task<nt::FuncTask> _clearOvlGradTask; nt::Task<nt::FuncTask> _clearOobGradTask; nt::Task<nt::FuncTask> _clearAsymGradTask; nt::Task<nt::FuncTask> _clearCosGradTask; nt::Task<nt::FuncTask> _clearPowerWlGradTask; nt::Task<nt::FuncTask> _clearCrfGradTask; // Sum the _grad from individual nt::Task<nt::FuncTask> _sumGradTask; nt::Task<nt::FuncTask> _sumHpwlGradTask; nt::Task<nt::FuncTask> _sumOvlGradTask; nt::Task<nt::FuncTask> _sumOobGradTask; nt::Task<nt::FuncTask> _sumAsymGradTask; nt::Task<nt::FuncTask> _sumCosGradTask; nt::Task<nt::FuncTask> _sumPowerWlTaskGradTask; nt::Task<nt::FuncTask> _sumCrfGradTask; // all the grads has been calculated but have not updated nt::Task<nt::FuncTask> _wrapCalcGradTask; ///< calculating the gradient and sum them /* run time */ std::unique_ptr<::klib::StopWatch> _calcGradStopWatch; std::unique_ptr<::klib::StopWatch> _optimizerKernelStopWatch; }; //// @brief some helper function for NlpGPlacerSecondOrder namespace _nlp_second_order_details { template<typename nlp_settings, BoolType is_diagonal> struct is_diagonal_select {}; template<typename nlp_settings> struct is_diagonal_select<nlp_settings, true> { typedef typename nlp_settings::nlp_types_type::EigenMatrix matrix_type; static void resize(matrix_type &matrix, IntType size) { matrix.resize(size, size); } static decltype(auto) inverse(matrix_type &matrix) { return matrix.diagonal().cwiseInverse().asDiagonal(); } }; template<typename nlp_settings> struct is_diagonal_select<nlp_settings, false> { typedef typename nlp_settings::nlp_types_type::EigenMatrix matrix_type; static void resize(matrix_type &matrix, IntType size) { matrix.resize(size, size); } static decltype(auto) inverse(matrix_type &matrix) { Assert(false); return matrix.diagonal().cwiseInverse(); } }; template<typename hessian_target_type> struct update_hessian { hessian_target_type &target; }; }; /// @brief first-order optimization template<typename nlp_settings> class NlpGPlacerSecondOrder : public NlpGPlacerFirstOrder<nlp_settings> { public: typedef typename NlpGPlacerFirstOrder<nlp_settings>::base_type base_type; typedef NlpGPlacerFirstOrder<nlp_settings> first_order_type; typedef typename first_order_type::nlp_numerical_type nlp_numerical_type; typedef typename first_order_type::nlp_coordinate_type nlp_coordinate_type; typedef typename nlp_settings::nlp_second_order_setting_type second_order_setting_type; typedef typename first_order_type::EigenMatrix EigenMatrix; typedef typename first_order_type::nlp_hpwl_type nlp_hpwl_type; typedef typename first_order_type::nlp_ovl_type nlp_ovl_type; typedef typename first_order_type::nlp_oob_type nlp_oob_type; typedef typename first_order_type::nlp_asym_type nlp_asym_type; typedef typename first_order_type::nlp_cos_type nlp_cos_type; typedef typename first_order_type::nlp_power_wl_type nlp_power_wl_type; typedef typename second_order_setting_type::hpwl_hessian_trait hpwl_hessian_trait; typedef typename second_order_setting_type::ovl_hessian_trait ovl_hessian_trait; typedef typename second_order_setting_type::oob_hessian_trait oob_hessian_trait; typedef typename second_order_setting_type::asym_hessian_trait asym_hessian_trait; typedef typename second_order_setting_type::cos_hessian_trait cos_hessian_trait; typedef typename second_order_setting_type::power_wl_hessian_trait power_wl_hessian_trait; /* figure out the types for storing the hessian */ // Determine whether the operators are return a diagonal hessian constexpr static BoolType isHpwlHessianDiagonal = diff::is_diagnol_matrix<hpwl_hessian_trait>::value; constexpr static BoolType isOvlHessianDiagonal = diff::is_diagnol_matrix<ovl_hessian_trait>::value; constexpr static BoolType isOobHessianDiagonal = diff::is_diagnol_matrix<oob_hessian_trait>::value; constexpr static BoolType isAsymHessianDiagonal = diff::is_diagnol_matrix<asym_hessian_trait>::value; constexpr static BoolType isCosHessianDiagonal = diff::is_diagnol_matrix<cos_hessian_trait>::value; constexpr static BoolType isPowerWlHessianDiagonal = diff::is_diagnol_matrix<power_wl_hessian_trait>::value; constexpr static BoolType isHessianDiagonal = isHpwlHessianDiagonal and isOvlHessianDiagonal and isOobHessianDiagonal and isAsymHessianDiagonal and isCosHessianDiagonal and isPowerWlHessianDiagonal; // define the supporting trait typedef _nlp_second_order_details::is_diagonal_select<nlp_settings, isHpwlHessianDiagonal> hpwl_hessian_diagonal_selector; friend hpwl_hessian_diagonal_selector; typedef _nlp_second_order_details::is_diagonal_select<nlp_settings, isOvlHessianDiagonal> ovl_hessian_diagonal_selector; friend ovl_hessian_diagonal_selector; typedef _nlp_second_order_details::is_diagonal_select<nlp_settings, isOobHessianDiagonal> oob_hessian_diagonal_selector; friend oob_hessian_diagonal_selector; typedef _nlp_second_order_details::is_diagonal_select<nlp_settings, isAsymHessianDiagonal> asym_hessian_diagonal_selector; friend asym_hessian_diagonal_selector; typedef _nlp_second_order_details::is_diagonal_select<nlp_settings, isCosHessianDiagonal> cos_hessian_diagonal_selector; friend cos_hessian_diagonal_selector; typedef _nlp_second_order_details::is_diagonal_select<nlp_settings, isPowerWlHessianDiagonal> power_wl_hessian_diagonal_selector; friend cos_hessian_diagonal_selector; typedef _nlp_second_order_details::is_diagonal_select<nlp_settings, isHessianDiagonal> hessian_diagonal_selector; friend hessian_diagonal_selector; typedef typename hpwl_hessian_diagonal_selector::matrix_type hpwl_hessian_matrix; typedef typename ovl_hessian_diagonal_selector::matrix_type ovl_hessian_matrix; typedef typename oob_hessian_diagonal_selector::matrix_type oob_hessian_matrix; typedef typename asym_hessian_diagonal_selector::matrix_type asym_hessian_matrix; typedef typename cos_hessian_diagonal_selector::matrix_type cos_hessian_matrix; typedef typename power_wl_hessian_diagonal_selector::matrix_type power_wl_hessian_matrix; typedef typename hessian_diagonal_selector::matrix_type hessian_matrix; /* define the algorithms */ typedef typename nlp_settings::nlp_second_order_algorithms_type nlp_second_order_algorithms; typedef typename nlp_second_order_algorithms::converge_type converge_type; typedef typename nlp::converge::converge_criteria_trait<converge_type> converge_trait; typedef typename nlp_second_order_algorithms::optm_type optm_type; typedef typename nlp::optm::optm_trait<optm_type> optm_trait; friend converge_type; template<typename converge_criteria_type> friend struct nlp::converge::converge_criteria_trait; friend optm_type; friend optm_trait; typedef typename nlp_settings::nlp_second_order_algorithms_type::mult_init_type mult_init_type; typedef nlp::outer_multiplier::init::multiplier_init_trait<mult_init_type> mult_init_trait; friend mult_init_trait; typedef typename nlp_settings::nlp_second_order_algorithms_type::mult_update_type mult_update_type; typedef nlp::outer_multiplier::update::multiplier_update_trait<mult_update_type> mult_update_trait; friend mult_update_trait; typedef typename nlp_settings::nlp_second_order_algorithms_type::mult_type mult_type; typedef nlp::outer_multiplier::multiplier_trait<mult_type> mult_trait; friend mult_trait; typedef typename nlp_settings::nlp_second_order_algorithms_type::mult_adjust_type mult_adjust_type; typedef nlp::outer_multiplier::update::multiplier_update_trait<mult_adjust_type> mult_adjust_trait; friend mult_adjust_trait; /* updating alpha parameters */ typedef typename nlp_settings::nlp_second_order_algorithms_type::alpha_type alpha_type; typedef nlp::alpha::alpha_trait<alpha_type> alpha_trait; template<typename T> friend struct nlp::alpha::alpha_trait; typedef typename nlp_settings::nlp_second_order_algorithms_type::alpha_update_type alpha_update_type; typedef nlp::alpha::update::alpha_update_trait<alpha_update_type> alpha_update_trait; template<typename T> friend struct nlp::alpha::update::alpha_update_trait; static constexpr nlp_numerical_type hessianMinBound = 0.01; static constexpr nlp_numerical_type hessianMaxBound = 10; NlpGPlacerSecondOrder(Database &db) : NlpGPlacerFirstOrder<nlp_settings>(db) {} public: decltype(auto) inverseHessian() { return hessian_diagonal_selector::inverse(_hessian); } void calcHessian() { _clearHessian(); _calcAllHessians(); _updateAllHessian(); clipHessian(); } protected: virtual void initProblem() override { first_order_type::initProblem(); initSecondOrder(); } void initSecondOrder(); /* Construct tasks */ virtual void optimize() override { WATCH_QUICK_START(); // setting up the multipliers this->assignIoPins(); this->_wrapObjAllTask.run(); this->_wrapCalcGradTask.run(); calcHessian(); optm_type optm; mult_type multiplier = mult_trait::construct(*this); mult_trait::init(*this, multiplier); mult_trait::recordRaw(*this, multiplier); mult_adjust_type multAdjuster = mult_adjust_trait::construct(*this, multiplier); mult_adjust_trait::init(*this, multiplier, multAdjuster); alpha_type alpha = alpha_trait::construct(*this); alpha_trait::init(*this, alpha); alpha_update_type alphaUpdate = alpha_update_trait::construct(*this, alpha); alpha_update_trait::init(*this, alpha, alphaUpdate); DBG("np \n"); std::cout<<"nlp address " <<this <<std::endl; IntType iter = 0; do { std::string debugGdsFilename = "./debug/"; debugGdsFilename += "gp_iter_" + std::to_string(iter)+".gds"; DBG("iter %d \n", iter); optm_trait::optimize(*this, optm); mult_trait::update(*this, multiplier); mult_trait::recordRaw(*this, multiplier); mult_adjust_trait::update(*this, multiplier, multAdjuster); alpha_update_trait::update(*this, alpha, alphaUpdate); this->assignIoPins(); DBG("obj %f hpwl %f ovl %f oob %f asym %f cos %f \n", this->_obj, this->_objHpwl, this->_objOvl, this->_objOob, this->_objAsym, this->_objCos); ++iter; } while (not base_type::stop_condition_trait::stopPlaceCondition(*this, this->_stopCondition)); auto end = WATCH_QUICK_END(); //std::cout<<"grad"<<"\n"<< _grad <<std::endl; std::cout<<"time "<< end / 1000 <<" ms" <<std::endl; this->writeOut(); } private: void constructCalcHessianTasks() { using hpwl = nt::CalculateOperatorHessianTask<nlp_hpwl_type, hpwl_hessian_trait, EigenMatrix, hpwl_hessian_matrix>; using ovl = nt::CalculateOperatorHessianTask<nlp_ovl_type, ovl_hessian_trait, EigenMatrix, ovl_hessian_matrix>; using oob = nt::CalculateOperatorHessianTask<nlp_oob_type, oob_hessian_trait, EigenMatrix, oob_hessian_matrix>; using asym = nt::CalculateOperatorHessianTask<nlp_asym_type, asym_hessian_trait, EigenMatrix, asym_hessian_matrix>; using cos = nt::CalculateOperatorHessianTask<nlp_cos_type, cos_hessian_trait, EigenMatrix, cos_hessian_matrix>; using pwl = nt::CalculateOperatorHessianTask<nlp_power_wl_type, power_wl_hessian_trait, EigenMatrix, power_wl_hessian_matrix>; auto getIdxFunc = [&](IndexType cellIdx, Orient2DType orient) { return this->plIdx(cellIdx, orient); }; // wrapper the convert cell idx to pl idx for (IndexType i = 0; i < this->_hpwlOps.size(); ++i) { _calcHpwlHessianTasks.emplace_back(hpwl(&this->_hpwlOps[i], &_hessianHpwl, getIdxFunc)); } for (auto &op : this->_ovlOps) { _calcOvlHessianTasks.emplace_back(ovl(&op, &_hessianOvl, getIdxFunc)); } for (auto &op : this->_oobOps) { _calcOobHessianTasks.emplace_back(oob(&op, &_hessianOob, getIdxFunc)); } for (auto &op : this->_asymOps) { _calcAsymHessianTasks.emplace_back(asym(&op, &_hessianAsym, getIdxFunc)); } for (auto &op : this->_cosOps) { _calcCosHessianTasks.emplace_back(cos(&op, &_hessianCos, getIdxFunc)); } for (auto &op : this->_powerWlOps) { _calcPowerWlHessianTasks.emplace_back(pwl(&op, &_hessianPowerWl, getIdxFunc)); } } void _clearHessian() { _hessian.setZero(); _hessianHpwl.setZero(); _hessianOvl.setZero(); _hessianOob.setZero(); _hessianAsym.setZero(); _hessianCos.setZero(); _hessianPowerWl.setZero(); } void _calcAllHessians() { #pragma omp parallel for schedule(static) for (IndexType i = 0; i < _calcHpwlHessianTasks.size(); ++i) { _calcHpwlHessianTasks[i].calc(); } #pragma omp parallel for schedule(static) for (IndexType i = 0; i < _calcOvlHessianTasks.size(); ++i) { _calcOvlHessianTasks[i].calc(); } #pragma omp parallel for schedule(static) for (IndexType i = 0; i < _calcOobHessianTasks.size(); ++i) { _calcOobHessianTasks[i].calc(); } #pragma omp parallel for schedule(static) for (IndexType i = 0; i < _calcAsymHessianTasks.size(); ++i) { _calcAsymHessianTasks[i].calc(); } #pragma omp parallel for schedule(static) for (IndexType i = 0; i < _calcCosHessianTasks.size(); ++i) { _calcCosHessianTasks[i].calc(); } #pragma omp parallel for schedule(static) for (IndexType i = 0; i < _calcPowerWlHessianTasks.size(); ++i) { _calcPowerWlHessianTasks[i].calc(); } } void _updateAllHessian() { #pragma omp parallel for schedule(static) for (IndexType i = 0; i < 6; ++i) { if (i == 0) { for (auto & calc : _calcHpwlHessianTasks) { calc.update(); } } else if (i == 1) { for (auto & calc : _calcOvlHessianTasks) { calc.update(); } } else if (i == 2) { for (auto & calc : _calcOobHessianTasks) { calc.update(); } } else if (i == 3) { for (auto & calc : _calcAsymHessianTasks) { calc.update(); } } else if (i == 4) { for (auto & calc : _calcCosHessianTasks) { calc.update(); } } else { for (auto & calc : _calcPowerWlHessianTasks) { calc.update(); } } } _hessian = _hessianHpwl + _hessianOvl + _hessianOob + _hessianAsym + _hessianCos + _hessianPowerWl; } void clipHessian() { _hessian = _hessian.cwiseMin(hessianMinBound).cwiseMax(hessianMaxBound); } virtual void constructTasks() override { first_order_type::constructTasks(); this->constructCalcHessianTasks(); } protected: hessian_matrix _hessian; ///< The hessian for the objective function hpwl_hessian_matrix _hessianHpwl; ///< The hessian for the hpwl function ovl_hessian_matrix _hessianOvl; ///< The hessian for the overlapping function oob_hessian_matrix _hessianOob; ///< The hessian for the out of boundary function asym_hessian_matrix _hessianAsym; ///< The hessian for the asymmetry function cos_hessian_matrix _hessianCos; ///< The hessian for the signal path function power_wl_hessian_matrix _hessianPowerWl; /* Tasks */ std::vector<nt::CalculateOperatorHessianTask<nlp_hpwl_type, hpwl_hessian_trait, EigenMatrix, hpwl_hessian_matrix>> _calcHpwlHessianTasks; ///< calculate and update the hessian std::vector<nt::CalculateOperatorHessianTask<nlp_ovl_type, ovl_hessian_trait, EigenMatrix, ovl_hessian_matrix>> _calcOvlHessianTasks; ///< calculate and update the hessian std::vector<nt::CalculateOperatorHessianTask<nlp_oob_type, oob_hessian_trait, EigenMatrix, oob_hessian_matrix>> _calcOobHessianTasks; ///< calculate and update the hessian std::vector<nt::CalculateOperatorHessianTask<nlp_asym_type, asym_hessian_trait, EigenMatrix, asym_hessian_matrix>> _calcAsymHessianTasks; ///< calculate and update the hessian std::vector<nt::CalculateOperatorHessianTask<nlp_cos_type, cos_hessian_trait, EigenMatrix, cos_hessian_matrix>> _calcCosHessianTasks; ///< calculate and update the hessian std::vector<nt::CalculateOperatorHessianTask<nlp_power_wl_type, power_wl_hessian_trait, EigenMatrix, power_wl_hessian_matrix>> _calcPowerWlHessianTasks; ///< calculate and update the hessian }; template<typename nlp_settings> inline void NlpGPlacerSecondOrder<nlp_settings>::initSecondOrder() { const IntType size = this->_numVariables; DBG("resize hessian to %d \n", size); hpwl_hessian_diagonal_selector::resize(_hessianHpwl, size); ovl_hessian_diagonal_selector::resize(_hessianOvl, size); oob_hessian_diagonal_selector::resize(_hessianOob, size); asym_hessian_diagonal_selector::resize(_hessianAsym, size); cos_hessian_diagonal_selector::resize(_hessianCos, size); power_wl_hessian_diagonal_selector::resize(_hessianPowerWl , size); } PROJECT_NAMESPACE_END #endif //IDEAPLACE_NLPGPLACER_H_
mhpTest1.c
int main() { int y; #pragma omp parallel { int x = 1; if (x > 0) { x = x + 1; #pragma omp barrier y = 6 + 3; } else { x = x + 2; #pragma omp barrier y = x + 4; } } }
setsketch.h
#ifndef EHLL_H__ #define EHLL_H__ #include <stdexcept> #include <cassert> #include "aesctr/wy.h" #include <queue> #include "sketch/div.h" #include <unordered_map> #include <memory> #include "fy.h" #include "sketch/count_eq.h" #include "sketch/macros.h" #include "sketch/hash.h" #include "xxHash/xxh3.h" #include "flat_hash_map/flat_hash_map.hpp" namespace sketch { namespace setsketch { namespace detail { struct Deleter { template<typename T> void operator()(const T *x) const {std::free(const_cast<T *>(x));} }; template <class F, class T> std::tuple<T, T, uint64_t> brent_find_minima(const F &f, T min, T max, int bits=std::numeric_limits<T>::digits, uint64_t max_iter=std::numeric_limits<uint64_t>::max()) noexcept { T x, w, v, u, delta, delta2, fu, fv, fw, fx, mid, fract1, fract2; const T tolerance = static_cast<T>(std::ldexp(1.0, 1-bits)); static constexpr T golden = 0.3819660; // golden ratio, don't need too much precision here! x = w = v = max; fw = fv = fx = f(x); delta2 = delta = 0; uint64_t count = max_iter; do { mid = (min + max) / 2; fract1 = tolerance * std::abs(x) + tolerance / 4; fract2 = 2 * fract1; if(std::abs(x - mid) <= (fract2 - (max - min) / 2)) break; if(std::abs(delta2) > fract1) { T r = (x - w) * (fx - fv); T q = (x - v) * (fx - fw); T p = (x - v) * q - (x - w) * r; q = 2 * (q - r); if(q > 0) p = -p; else q = -q; T td = delta2; delta2 = delta; if((std::abs(p) >= std::abs(q * td / 2)) || (p <= q * (min - x)) || (p >= q * (max - x))) { delta2 = (x >= mid) ? min - x : max - x; delta = golden * delta2; } else { delta = p / q; u = x + delta; if(((u - min) < fract2) || ((max- u) < fract2)) delta = (mid - x) < 0 ? (T)-std::abs(fract1) : (T)std::abs(fract1); } } else { delta2 = (x >= mid) ? min - x : max - x; delta = golden * delta2; } u = (std::abs(delta) >= fract1) ? T(x + delta) : (delta > 0 ? T(x + std::abs(fract1)) : T(x - std::abs(fract1))); fu = f(u); if(fu <= fx) { if(u >= x) min = x; else max = x; v = w;w = x; x = u; fv = fw; fw = fx; fx = fu; } else { // Oh dear, point u is worse than what we have already, // even so it *must* be better than one of our endpoints: if(u < x) min = u; else max = u; if((fu <= fw) || (w == x)) v = w, w = u, fv = fw, fw = fu; else if((fu <= fv) || (v == x) || (v == w)) v = u, fv = fu; } } while(--count); return std::make_tuple(x, fx, max_iter - count); } } template<typename FT> static inline FT jmle_simple(const uint64_t lhgt, const uint64_t rhgt, const size_t m, const FT lhest, const FT rhest, FT base) { if(!lhest && !rhest) return FT(0.); const uint64_t neq = m - (lhgt + rhgt); const FT sumest = lhest + rhest; const long double bi = 1.L / base; const long double lbase = std::log(static_cast<long double>(base)), lbi = 1. / lbase; //const long double lbdb = base - 1. ? std::log1p(base - 1.L) / (base - 1.L): 1.L; const FT z = (1.L - bi) / (sumest); auto func = [neq,lhgt,rhgt,lbi,z,rhest,lhest](auto jaccard) { FT lhs = neq || lhgt ? FT(lbi * std::log1p((rhest * jaccard - lhest) * z)): FT(0); FT rhs = neq || rhgt ? FT(lbi * std::log1p((lhest * jaccard - rhest) * z)): FT(0); FT ret = 0; if(neq) ret += neq * std::log1p(lhs + rhs); if(lhgt) ret += lhgt * std::log(-lhs); if(rhgt) ret += rhgt * std::log(-rhs); if(std::isnan(ret)) return std::numeric_limits<FT>::max(); return -ret; }; return std::get<0>(detail::brent_find_minima(func, FT(0), std::min(lhest, rhest) / std::max(lhest, rhest), 24)); } #if __cplusplus >= 201703L static constexpr double INVMUL64 = 0x1p-64; #else static constexpr double INVMUL64 = 5.42101086242752217e-20; #endif // Implementations of set sketch template<typename FT> class mvt_t { FT mv_; FT *data_ = nullptr; size_t m_; public: mvt_t(size_t m, FT mv = std::numeric_limits<FT>::max()): mv_(m), m_(m) {} FT mv() const {return mv_;} FT *data() {return data_;} const FT *data() const {return data_;} // Check size and max size_t getm() const {return m_;} size_t nelem() const {return 2 * m_ - 1;} FT operator[](size_t i) const {return data_[i];} void assign(FT *vals, size_t nvals, FT mv) { mv_ = mv; assign(vals, nvals); } void assign(FT *vals, size_t nvals) { data_ = vals; m_ = nvals; std::fill(data_, data_ + nelem(), mv_); } FT max() const { return data_[nelem() - 1]; } FT klow() const { return max(); } bool update(size_t index, FT x) { const auto sz = nelem(); if(x < data_[index]) { for(;;) { data_[index] = x; if((index = m_ + (index >> 1)) >= sz) break; const size_t lhi = (index - m_) << 1, rhi = lhi + 1; x = std::max(data_[lhi], data_[rhi]); if(x >= data_[index]) break; } assert(max() == *std::max_element(data_, data_ + m_)); return true; } return false; } }; template<typename ResT> struct minvt_t { static constexpr ResT minv_ = 0; ResT *data_ = nullptr; size_t m_; long double b_ = -1., explim_ = -1.; minvt_t(size_t m): m_(m) {} double explim() const {return explim_;} ResT *data() {return data_;} const ResT *data() const {return data_;} // Check size and max size_t getm() const {return m_;} ResT operator[](size_t i) const {return data_[i];} void assign(ResT *vals, size_t nvals, double b) { data_ = vals; m_ = nvals; b_ = b; std::fill(data_, data_ + (m_ << 1) - 1, minv_); explim_ = std::pow(b_, -min()); } typename std::ptrdiff_t min() const { return data_[(m_ << 1) - 2]; } typename std::ptrdiff_t klow() const { return min(); } typename std::ptrdiff_t max() const {return *std::max_element(data_, &data_[(m_ << 1) - 1]);} bool update(size_t index, ResT x) { const auto sz = (m_ << 1) - 1; if(x > data_[index]) { for(;;) { data_[index] = x; if((index = m_ + (index >> 1)) >= sz) break; const size_t lhi = (index - m_) << 1, rhi = lhi + 1; x = std::min(data_[lhi], data_[rhi]); if(x <= data_[index]) break; } explim_ = std::pow(b_, -min()); assert(min() == *std::min_element(data_, data_ + m_)); return true; } return false; } }; template<typename ResT> struct LowKHelper { ResT *vals_; uint64_t natval_, nvals_; double b_ = -1.; double explim_; int klow_ = 0; LowKHelper(size_t m): nvals_(m) {} void assign(ResT *vals, size_t nvals, double b) { vals_ = vals; nvals_ = nvals; b_ = b; reset(); } int klow() const {return klow_;} auto max() const {return *std::max_element(vals_, vals_ + nvals_);} double explim() const {return explim_;} void reset() { klow_ = *std::min_element(vals_, vals_ + nvals_); size_t i; for(i = natval_ = 0; i < nvals_; ++i) natval_ += (vals_[i] == klow_); explim_ = std::pow(b_, -klow_); } bool update(size_t idx, ResT k) { if(k > vals_[idx]) { auto oldv = vals_[idx]; vals_[idx] = k; remove(oldv); return true; } return false; } void remove(int kval) { if(kval == klow_) { if(--natval_ == 0) reset(); } } }; #if __AVX2__ INLINE float broadcast_reduce_sum(__m256 x) { const __m256 permHalves = _mm256_permute2f128_ps(x, x, 1); const __m256 m0 = _mm256_add_ps(permHalves, x); const __m256 perm0 = _mm256_permute_ps(m0, 0b01001110); const __m256 m1 = _mm256_add_ps(m0, perm0); const __m256 perm1 = _mm256_permute_ps(m1, 0b10110001); const __m256 m2 = _mm256_add_ps(perm1, m1); return m2[0]; } INLINE double broadcast_reduce_sum(__m256d x) { __m256d m1 = _mm256_add_pd(x, _mm256_permute2f128_pd(x, x, 1)); return _mm256_add_pd(m1, _mm256_permute_pd(m1, 5))[0]; } #endif static inline long double g_b(long double b, long double arg) { return (1.L - std::pow(b, -arg)) / (1.L - 1.L / b); } template<typename ResT, typename FT=double> class SetSketch; // Forward template<typename FT=double, bool FLOGFILTER=true> class CSetSketch { static_assert(std::is_floating_point<FT>::value, "Must float"); // SetSketch 1 size_t m_; // Number of registers std::unique_ptr<FT[], detail::Deleter> data_; fy::LazyShuffler ls_; mvt_t<FT> mvt_; std::vector<uint64_t> ids_; std::vector<uint32_t> idcounts_; uint64_t total_updates_ = 0; mutable double mycard_ = -1.; static FT *allocate(size_t n) { n = (n << 1) - 1; FT *ret = nullptr; static constexpr size_t ALN = #if __AVX512F__ 64; #elif __AVX2__ 32; #else 16; #endif if(posix_memalign((void **)&ret, ALN, n * sizeof(FT))) throw std::bad_alloc(); return ret; } FT getbeta(size_t idx) const { return FT(1.) / static_cast<FT>(m_ - idx); } public: const FT *data() const {return data_.get();} FT *data() {return data_.get();} CSetSketch(size_t m, bool track_ids=false, bool track_counts=false, FT maxv=std::numeric_limits<FT>::max()): m_(m), ls_(m_), mvt_(m_) { data_.reset(allocate(m_)); mvt_.assign(data_.get(), m_, maxv); if(track_ids || track_counts) ids_.resize(m_); if(track_counts) idcounts_.resize(m_); //generate_betas(); } CSetSketch(const CSetSketch &o): m_(o.m_), data_(allocate(o.m_)), ls_(m_), mvt_(m_, o.mvt_.mv()), ids_(o.ids_), idcounts_(o.idcounts_) { mvt_.assign(data_.get(), m_, o.mvt_.mv()); std::copy(o.data_.get(), &o.data_[2 * m_ - 1], data_.get()); //generate_betas(); } template<typename ResT=uint16_t> SetSketch<ResT, FT> to_setsketch(double b, double a, int64_t q=std::numeric_limits<ResT>::max() - 1) const { SetSketch<ResT, FT> ret(m_, b, a, q, ids_.size()); const double logbinv = 1. / std::log1p(b - 1.); for(size_t i = 0; i < m_; ++i) { ret.lowkh().update(i, std::max(int64_t(0), std::min(int64_t(q) + 1, static_cast<int64_t>((1. - std::log(data_[i] / a) * logbinv))))); } return ret; } CSetSketch &operator=(const CSetSketch &o) { if(size() != o.size()) { if(m_ < o.m_) data_.reset(allocate(o.m_)); m_ = o.m_; ls_.resize(m_); //generate_betas(); } mvt_.assign(data_.get(), m_, o.mvt_.mv()); std::copy(o.data(), o.data() + (2 * m_ - 1), data()); if(o.ids_.size()) { ids_ = o.ids_; if(o.idcounts_.size()) idcounts_ = o.idcounts_; } total_updates_ = o.total_updates_; return *this; } CSetSketch(std::FILE *fp): ls_(1), mvt_(1) {read(fp);} CSetSketch(gzFile fp): ls_(1), mvt_(1) {read(fp);} CSetSketch(const std::string &s): ls_(1), mvt_(1) { read(s); } CSetSketch<FT> clone_like() const { return CSetSketch(m_, !ids().empty(), !idcounts().empty()); } FT min() const {return *std::min_element(data(), data() + m_);} FT max() const {return mvt_.max();} size_t size() const {return m_;} FT &operator[](size_t i) {return data_[i];} const FT &operator[](size_t i) const {return data_[i];} void addh(uint64_t id) {update(id);} void add(uint64_t id) {update(id);} size_t total_updates() const {return total_updates_;} long double flog(long double x) const { __uint128_t yi; std::memcpy(&yi, &x, sizeof(x)); return yi * 3.7575583950764744255e-20L - 11356.176832703863597L; } double flog(double x) const { uint64_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 1.539095918623324e-16 - 709.0895657128241; } float flog(float x) const { uint32_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 8.2629582881927490e-8f - 88.02969186f; } void update(const uint64_t id) { mycard_ = -1.; ++total_updates_; uint64_t hid = id; uint64_t rv = wy::wyhash64_stateless(&hid); FT ev; FT mv = max(); CONST_IF(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; const FT bv = -1. / m_; lrv |= wy::wyhash64_stateless(&rv); FT tv = static_cast<long double>((lrv >> 32) * 1.2621774483536188887e-29L); ev = bv * std::log(tv); if(ev > mv) return; } else { auto tv = rv * INVMUL64; const FT bv = -1. / m_; // Filter with fast log first CONST_IF(FLOGFILTER) { if(bv * flog(tv) * FT(.7) > mv) return; } ev = bv * std::log(tv); if(ev > mv) return; } ls_.reset(); ls_.seed(rv); uint64_t bi = 1; uint32_t idx = ls_.step(); for(;;) { if(mvt_.update(idx, ev)) { if(!ids_.empty()) { ids_.operator[](idx) = id; if(!idcounts_.empty()) idcounts_.operator[](idx) = 1; } mv = max(); } else if(!idcounts_.empty()) { if(id == ids_.operator[](idx)) ++idcounts_.operator[](idx); } if(bi == m_) return; rv = wy::wyhash64_stateless(&hid); const FT bv = -getbeta(bi++); CONST_IF(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; lrv |= wy::wyhash64_stateless(&rv); ev = std::fma(bv, std::log((lrv >> 32) * 1.2621774483536188887e-29L), ev); if(ev > mv) break; } else { const FT nv = rv * INVMUL64; CONST_IF(FLOGFILTER) { if(bv * flog(nv) * FT(.7) + ev >= mv) break; } ev = std::fma(bv, std::log(nv), ev); if(ev > mv) break; } idx = ls_.step(); } } bool operator==(const CSetSketch<FT> &o) const { return same_params(o) && std::equal(data(), data() + m_, o.data()); } bool same_params(const CSetSketch<FT> &o) const { return m_ == o.m_ && (ids().empty() == o.ids().empty()) && (idcounts().empty() == o.idcounts().empty()); } void merge(const CSetSketch<FT> &o) { if(!same_params(o)) throw std::runtime_error("Can't merge sets with differing parameters"); if(ids().empty()) { std::transform(data(), data() + m_, o.data(), data(), [](auto x, auto y) {return std::min(x, y);}); } else { for(size_t i = 0; i < size(); ++i) { if(!idcounts_.empty() && !ids_.empty() && ids_[i] == o.ids_[i]) { idcounts_[i] += o.idcounts_[i]; } else if(mvt_.update(i, o.data_[i])) { if(!ids_.empty()) ids_[i] = o.ids_[i]; if(!idcounts_.empty()) idcounts_[i] = o.idcounts_[i]; } } } total_updates_ += o.total_updates_; mycard_ = -1.; } CSetSketch &operator+=(const CSetSketch<FT> &o) {merge(o); return *this;} CSetSketch operator+(const CSetSketch<FT> &o) const { CSetSketch ret(*this); ret += o; return ret; } double jaccard_index(const CSetSketch<FT> &o) const { return shared_registers(o) / double(m_); } size_t shared_registers(const CSetSketch<FT> &o) const { CONST_IF(sizeof(FT) == 4) { return eq::count_eq((uint32_t *)data(), (uint32_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 8) { return eq::count_eq((uint64_t *)data(), (uint64_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 2) { return eq::count_eq((uint16_t *)data(), (uint16_t *)o.data(), m_); } auto optr = o.data(); return std::accumulate(data(), data() + m_, size_t(0), [&optr](size_t nshared, FT x) { return nshared + (x == *optr++); }); } void write(std::string s) const { gzFile fp = gzopen(s.data(), "w"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s + "for writing"); write(fp); gzclose(fp); } void read(std::string s) { gzFile fp = gzopen(s.data(), "r"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s); read(fp); gzclose(fp); } void read(gzFile fp) { gzread(fp, &m_, sizeof(m_)); FT mv; gzread(fp, &mv, sizeof(mv)); data_.reset(allocate(m_)); mvt_.assign(data_.get(), m_, mv); gzread(fp, (void *)data_.get(), m_ * sizeof(FT)); for(size_t i = 0;i < m_; ++i) mvt_.update(i, data_[i]); ls_.resize(m_); } int checkwrite(std::FILE *fp, const void *ptr, size_t nb) const { auto ret = ::write(::fileno(fp), ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } int checkwrite(gzFile fp, const void *ptr, size_t nb) const { auto ret = gzwrite(fp, ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } void write(std::FILE *fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); FT m = mvt_.mv(); checkwrite(fp, (const void *)&m, sizeof(m)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void write(gzFile fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); FT m = mvt_.mv(); checkwrite(fp, (const void *)&m, sizeof(m)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void reset() {clear();} void clear() { mvt_.assign(data_.get(), m_, mvt_.mv()); total_updates_ = 0; if(ids_.size()) { std::fill(ids_.begin(), ids_.end(), uint64_t(0)); if(idcounts_.size()) std::fill(idcounts_.begin(), idcounts_.end(), uint32_t(0)); } mycard_ = -1.; } const std::vector<uint64_t> &ids() const {return ids_;} const std::vector<uint32_t> &idcounts() const {return idcounts_;} double union_size(const CSetSketch<FT> &o) const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += std::min(data_[i], o.data_[i]); return m_ / s; } auto alpha_beta(const CSetSketch<FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); return std::pair<double, double>{double(gtlt.first) / m_, double(gtlt.second) / m_}; } static constexpr double __union_card(double alph, double beta, double lhcard, double rhcard) { return std::max((lhcard + rhcard) / (2. - alph - beta), 0.); } double getcard() const { if(mycard_ < 0.) mycard_ = cardinality(); return mycard_; } double intersection_size(const CSetSketch<FT> &o) const { auto triple = alpha_beta_mu(o); return std::max(1. - (std::get<0>(triple) + std::get<1>(triple)), 0.) * std::get<2>(triple); } std::tuple<double, double, double> alpha_beta_mu(const CSetSketch<FT> &o) const { const auto ab = alpha_beta(o); auto mycard = getcard(), ocard = o.getcard(); if(ab.first + ab.second >= 1.) // They seem to be disjoint sets, use SetSketch (15) return {(mycard) / (mycard + ocard), ocard / (mycard + ocard), mycard + ocard}; return {ab.first, ab.second, __union_card(ab.first, ab.second, mycard, ocard)}; } double cardinality_estimate() const {return cardinality();} double cardinality() const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += data_[i]; return m_ / s; } static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg, size_t q) { long double b = std::exp(std::log((long double)maxreg / (long double)minreg) / (long double)q); return {FT(b), FT((long double)maxreg / b)}; } template<typename ResT=uint16_t> static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg) { if(maxreg < minreg) std::swap(maxreg, minreg); return optimal_parameters(maxreg, minreg, std::numeric_limits<ResT>::max()); } double containment_index(const CSetSketch<FT> &o) const { auto abm = alpha_beta_mu(o); auto lho = std::get<0>(abm); auto isf = std::max(1. - (lho + std::get<1>(abm)), 0.); return isf / (lho + isf); } }; template<typename FT=double, bool FLOGFILTER=true> class OPCSetSketch { static_assert(std::is_floating_point<FT>::value, "Must float"); // SetSketch 1 size_t m_; // Number of registers std::unique_ptr<FT[], detail::Deleter> data_; schism::Schismatic<uint32_t> div_; std::vector<uint64_t> ids_; std::vector<uint32_t> idcounts_; uint64_t total_updates_ = 0; mutable double mycard_ = -1.; static FT *allocate(size_t n) { FT *ret = nullptr; static constexpr size_t ALN = #if __AVX512F__ 64; #elif __AVX2__ 32; #else 16; #endif if(posix_memalign((void **)&ret, ALN, n * sizeof(FT))) throw std::bad_alloc(); return ret; } public: const FT *data() const {return data_.get();} FT *data() {return data_.get();} OPCSetSketch(size_t m, bool track_ids=false, bool track_counts=false, FT maxv=std::numeric_limits<FT>::max()): m_(m), div_(m_) { data_.reset(allocate(m_)); std::fill(data_.get(), &data_[m_], maxv); if(track_ids || track_counts) ids_.resize(m_); if(track_counts && !track_ids) { std::fprintf(stderr, "track_counts implies track_ids, enabling both\n"); track_ids = true; } if(track_counts) idcounts_.resize(m_); //generate_betas(); } OPCSetSketch(const OPCSetSketch &o): m_(o.m_), data_(allocate(o.m_)), div_(m_), ids_(o.ids_), idcounts_(o.idcounts_) { std::copy(o.data_[0], &o.data_[m_], data_.get()); //generate_betas(); } template<typename ResT=uint16_t> SetSketch<ResT, FT> to_setsketch(double b, double a, int64_t q=std::numeric_limits<ResT>::max() - 1) const { SetSketch<ResT, FT> ret(m_, b, a, q, ids_.size()); const double logbinv = 1. / std::log1p(b - 1.); for(size_t i = 0; i < m_; ++i) { ret.lowkh().update(i, std::max(int64_t(0), std::min(int64_t(q) + 1, static_cast<int64_t>((1. - std::log(data_[i] / a) * logbinv))))); } return ret; } OPCSetSketch &operator=(const OPCSetSketch &o) { if(size() != o.size()) { if(m_ < o.m_) data_.reset(allocate(o.m_)); m_ = o.m_; } std::copy(o.data(), &o.data()[m_], data()); if(o.ids_.size()) { ids_ = o.ids_; if(o.idcounts_.size()) idcounts_ = o.idcounts_; } total_updates_ = o.total_updates_; return *this; } OPCSetSketch(std::FILE *fp): div_(1) {read(fp);} OPCSetSketch(gzFile fp): div_(1) {read(fp);} OPCSetSketch(const std::string &s): div_(1) { read(s); } OPCSetSketch<FT> clone_like() const { return OPCSetSketch(m_, !ids().empty(), !idcounts().empty()); } FT min() const {return *std::min_element(data(), data() + m_);} FT max() const {return *std::max_element(data(), data() + m_);} size_t size() const {return m_;} FT &operator[](size_t i) {return data_[i];} const FT &operator[](size_t i) const {return data_[i];} void addh(uint64_t id) {update(id);} void add(uint64_t id) {update(id);} size_t total_updates() const {return total_updates_;} long double flog(long double x) const { __uint128_t yi; std::memcpy(&yi, &x, sizeof(x)); return yi * 3.7575583950764744255e-20L - 11356.176832703863597L; } double flog(double x) const { uint64_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 1.539095918623324e-16 - 709.0895657128241; } float flog(float x) const { uint32_t yi; std::memcpy(&yi, &x, sizeof(yi)); return yi * 8.2629582881927490e-8f - 88.02969186f; } bool update(const uint64_t id) { mycard_ = -1.; ++total_updates_; uint64_t hid = id; uint64_t rv = wy::wyhash64_stateless(&hid); FT ev; CONST_IF(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; const FT bv = -1. / m_; lrv |= wy::wyhash64_stateless(&rv); FT tv = static_cast<long double>((lrv >> 32) * 1.2621774483536188887e-29L); ev = bv * std::log(tv); } else { auto tv = rv * INVMUL64; const FT bv = -1. / m_; // Filter with fast log first ev = bv * std::log(tv); } auto idx = div_.mod(rv); if(data_[idx] > ev) { data_[idx] = ev; if(!ids_.empty()) { ids_[idx] = id; if(!idcounts_.empty()) idcounts_[idx] = 1; } return true; } else if(data_[idx] == ev && !ids_.empty() && ids_[idx] == id && !idcounts_.empty()) ++idcounts_[idx]; return false; } bool operator==(const OPCSetSketch<FT> &o) const { return same_params(o) && std::equal(data(), data() + m_, o.data()); } bool same_params(const OPCSetSketch<FT> &o) const { return m_ == o.m_ && (ids().empty() == o.ids().empty()) && (idcounts().empty() == o.idcounts().empty()); } void merge(const OPCSetSketch<FT> &o) { if(!same_params(o)) throw std::runtime_error("Can't merge sets with differing parameters"); if(ids().empty()) { std::transform(data(), data() + m_, o.data(), data(), [](auto x, auto y) {return std::min(x, y);}); } else { for(size_t i = 0; i < size(); ++i) { if(!idcounts_.empty() && !ids_.empty() && ids_[i] == o.ids_[i]) { idcounts_[i] += o.idcounts_[i]; } else if(data_[i] < o.data_[i]) { data_[i] = o.data_[i]; if(!ids_.empty()) ids_[i] = o.ids_[i]; if(!idcounts_.empty()) idcounts_[i] = o.idcounts_[i]; } } } total_updates_ += o.total_updates_; mycard_ = -1.; } OPCSetSketch &operator+=(const OPCSetSketch<FT> &o) {merge(o); return *this;} OPCSetSketch operator+(const OPCSetSketch<FT> &o) const { OPCSetSketch ret(*this); ret += o; return ret; } double jaccard_index(const OPCSetSketch<FT> &o) const { return shared_registers(o) / double(m_); } size_t shared_registers(const OPCSetSketch<FT> &o) const { CONST_IF(sizeof(FT) == 4) { return eq::count_eq((uint32_t *)data(), (uint32_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 8) { return eq::count_eq((uint64_t *)data(), (uint64_t *)o.data(), m_); } else CONST_IF(sizeof(FT) == 2) { return eq::count_eq((uint16_t *)data(), (uint16_t *)o.data(), m_); } auto optr = o.data(); return std::accumulate(data(), data() + m_, size_t(0), [&optr](size_t nshared, FT x) { return nshared + (x == *optr++); }); } void write(std::string s) const { gzFile fp = gzopen(s.data(), "w"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s + "for writing"); write(fp); gzclose(fp); } void read(std::string s) { gzFile fp = gzopen(s.data(), "r"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s); read(fp); gzclose(fp); } void read(gzFile fp) { gzread(fp, &m_, sizeof(m_)); data_.reset(allocate(m_)); div_ = schism::Schismatic<uint32_t>(m_); gzread(fp, (void *)data_.get(), m_ * sizeof(FT)); } int checkwrite(std::FILE *fp, const void *ptr, size_t nb) const { auto ret = ::write(::fileno(fp), ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } int checkwrite(gzFile fp, const void *ptr, size_t nb) const { auto ret = gzwrite(fp, ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } void write(std::FILE *fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void write(gzFile fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(FT)); } void reset() {clear();} void clear() { std::fill_n(data_.get(), m_, std::numeric_limits<FT>::max()); total_updates_ = 0; if(ids_.size()) { std::fill(ids_.begin(), ids_.end(), uint64_t(0)); if(idcounts_.size()) std::fill(idcounts_.begin(), idcounts_.end(), uint32_t(0)); } mycard_ = -1.; } const std::vector<uint64_t> &ids() const {return ids_;} const std::vector<uint32_t> &idcounts() const {return idcounts_;} double union_size(const OPCSetSketch<FT> &o) const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += std::min(data_[i], o.data_[i]); return m_ / s; } auto alpha_beta(const OPCSetSketch<FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); return std::pair<double, double>{double(gtlt.first) / m_, double(gtlt.second) / m_}; } static constexpr double __union_card(double alph, double beta, double lhcard, double rhcard) { return std::max((lhcard + rhcard) / (2. - alph - beta), 0.); } double getcard() const { if(mycard_ < 0.) mycard_ = cardinality(); return mycard_; } double intersection_size(const OPCSetSketch<FT> &o) const { auto triple = alpha_beta_mu(o); return std::max(1. - (std::get<0>(triple) + std::get<1>(triple)), 0.) * std::get<2>(triple); } std::tuple<double, double, double> alpha_beta_mu(const OPCSetSketch<FT> &o) const { const auto ab = alpha_beta(o); auto mycard = getcard(), ocard = o.getcard(); if(ab.first + ab.second >= 1.) // They seem to be disjoint sets, use SetSketch (15) return {(mycard) / (mycard + ocard), ocard / (mycard + ocard), mycard + ocard}; return {ab.first, ab.second, __union_card(ab.first, ab.second, mycard, ocard)}; } double cardinality_estimate() const {return cardinality();} double cardinality() const { double s = 0.; #if _OPENMP >= 201307L #pragma omp simd reduction(+:s) #endif for(size_t i = 0; i < m_; ++i) s += data_[i]; return m_ / s; } static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg, size_t q) { long double b = std::exp(std::log((long double)maxreg / (long double)minreg) / (long double)q); return {FT(b), FT((long double)maxreg / b)}; } template<typename ResT=uint16_t> static std::pair<long double, long double> optimal_parameters(FT maxreg, FT minreg) { if(maxreg < minreg) std::swap(maxreg, minreg); return optimal_parameters(maxreg, minreg, std::numeric_limits<ResT>::max()); } double containment_index(const OPCSetSketch<FT> &o) const { auto abm = alpha_beta_mu(o); auto lho = std::get<0>(abm); auto isf = std::max(1. - (lho + std::get<1>(abm)), 0.); return isf / (lho + isf); } }; template<typename FT> static inline double intersection_size(const OPCSetSketch<FT> &lhs, const OPCSetSketch<FT> &rhs) { return lhs.intersection_size(rhs); } template<typename FT> static inline double intersection_size(const CSetSketch<FT> &lhs, const CSetSketch<FT> &rhs) { return lhs.intersection_size(rhs); } template<typename ResT, typename FT> class SetSketch { static_assert(std::is_floating_point<FT>::value, "Must float"); static_assert(std::is_integral<ResT>::value, "Must be integral"); // Set sketch 1 size_t m_; // Number of registers FT a_; // Exponential parameter FT b_; // Base FT ainv_; FT logbinv_; using QType = std::common_type_t<ResT, int>; QType q_; std::unique_ptr<ResT[], detail::Deleter> data_; std::vector<uint64_t> ids_; // The IDs representing the sampled items. // Only used if SetSketch is fy::LazyShuffler ls_; minvt_t<ResT> lowkh_; std::vector<FT> lbetas_; // Cache Beta values * 1. / a mutable double mycard_ = -1.; static ResT *allocate(size_t n) { n = (n << 1) - 1; ResT *ret = nullptr; static constexpr size_t ALN = #if __AVX512F__ 64; #elif __AVX2__ 32; #else 16; #endif #if __cplusplus >= 201703L && defined(_GLIBCXX_HAVE_ALIGNED_ALLOC) if((ret = static_cast<ResT *>(std::aligned_alloc(ALN, n * sizeof(ResT)))) == nullptr) #else if(posix_memalign((void **)&ret, ALN, n * sizeof(ResT))) #endif throw std::bad_alloc(); return ret; } FT getbeta(size_t idx) const { return FT(1.) / (m_ - idx); } public: const ResT *data() const {return data_.get();} ResT *data() {return data_.get();} auto &lowkh() {return lowkh_;} const auto &lowkh() const {return lowkh_;} SetSketch(size_t m, FT b, FT a, int q, bool track_ids = false): m_(m), a_(a), b_(b), ainv_(1./ a), logbinv_(1. / std::log1p(b_ - 1.)), q_(q), ls_(m_), lowkh_(m) { ResT *p = allocate(m_); data_.reset(p); std::fill(p, p + m_, static_cast<ResT>(0)); lowkh_.assign(p, m_, b_); if(track_ids) ids_.resize(m_); lbetas_.resize(m_); for(size_t i = 0; i < m_; ++i) { lbetas_[i] = -ainv_ / (m_ - i); } } SetSketch(const SetSketch &o): m_(o.m_), a_(o.a_), b_(o.b_), ainv_(o.ainv_), logbinv_(o.logbinv_), q_(o.q_), ls_(m_), lowkh_(m_), lbetas_(o.lbetas_) { ResT *p = allocate(m_); data_.reset(p); lowkh_.assign(p, m_, b_); std::copy(o.data_.get(), &o.data_[2 * m_ - 1], p); } SetSketch(SetSketch &&o) = default; SetSketch(const std::string &s): ls_(1), lowkh_(1) { read(s); } size_t size() const {return m_;} double b() const {return b_;} double a() const {return a_;} ResT &operator[](size_t i) {return data_[i];} const ResT &operator[](size_t i) const {return data_[i];} int klow() const {return lowkh_.klow();} auto max() const {return lowkh_.max();} auto min() const {return lowkh_.min();} void addh(uint64_t id) {update(id);} void add(uint64_t id) {update(id);} void print() const { std::fprintf(stderr, "%zu = m, a %lg, b %lg, q %d\n", m_, double(a_), double(b_), int(q_)); } void update(const uint64_t id) { mycard_ = -1.; uint64_t hid = id; size_t bi = 0; uint64_t rv = wy::wyhash64_stateless(&hid); double ev = 0.; ls_.reset(); ls_.seed(rv); for(;;) { const auto ba = lbetas_[bi]; if(sizeof(FT) > 8) { auto lrv = __uint128_t(rv) << 64; lrv |= wy::wyhash64_stateless(&rv); ev += ba * std::log((lrv >> 32) * 1.2621774483536188887e-29L); } else { ev += ba * std::log(rv * INVMUL64); } if(ev > lowkh_.explim()) return; const QType k = std::max(0, std::min(q_ + 1, static_cast<QType>((1. - std::log(ev) * logbinv_)))); if(k <= klow()) return; auto idx = ls_.step(); if(lowkh_.update(idx, k)) { if(!ids_.empty()) { ids_[idx] = id; } } if(++bi == m_) return; rv = wy::wyhash64_stateless(&hid); } } bool operator==(const SetSketch<ResT, FT> &o) const { return same_params(o) && std::equal(data(), data() + m_, o.data()); } bool same_params(const SetSketch<ResT,FT> &o) const { return std::tie(b_, a_, m_, q_) == std::tie(o.b_, o.a_, o.m_, o.q_); } double harmean(const SetSketch<ResT, FT> *ptr=static_cast<const SetSketch<ResT, FT> *>(nullptr)) const { static std::unordered_map<FT, std::vector<FT>> powers; auto it = powers.find(b_); if(it == powers.end()) { it = powers.emplace(b_, std::vector<FT>()).first; it->second.resize(q_ + 2); for(size_t i = 0; i < it->second.size(); ++i) { it->second[i] = std::pow(static_cast<long double>(b_), -static_cast<ptrdiff_t>(i)); } } std::vector<uint32_t> counts(q_ + 2); if(ptr) { for(size_t i = 0; i < m_; ++i) { ++counts[std::max(data_[i], ptr->data()[i])]; } } else { for(size_t i = 0; i < m_; ++i) { ++counts[data_[i]]; } } long double ret = 0.; for(ptrdiff_t i = lowkh_.klow(); i <= q_ + 1; ++i) { ret += counts[i] * it->second[i]; } return ret; } double jaccard_by_ix(const SetSketch<ResT, FT> &o) const { auto us = union_size(o); auto mycard = getcard(), ocard = o.getcard(); return (mycard + ocard - us) / us; } double union_size(const SetSketch<ResT, FT> &o) const { double num = m_ * (1. - 1. / b_) * logbinv_ * ainv_; return num / harmean(&o); } double cardinality_estimate() const {return cardinality();} double cardinality() const { double num = m_ * (1. - 1. / b_) * logbinv_ * ainv_; return num / harmean(); } void merge(const SetSketch<ResT, FT> &o) { if(!same_params(o)) throw std::runtime_error("Can't merge sets with differing parameters"); std::transform(data(), data() + m_, o.data(), data(), [](auto x, auto y) {return std::max(x, y);}); mycard_ = -1.; } SetSketch &operator+=(const SetSketch<ResT, FT> &o) {merge(o); return *this;} SetSketch operator+(const SetSketch<ResT, FT> &o) const { SetSketch ret(*this); ret += o; return ret; } size_t shared_registers(const SetSketch<ResT, FT> &o) const { return eq::count_eq(data(), o.data(), m_); } std::pair<double, double> alpha_beta(const SetSketch<ResT, FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); double alpha = g_b(b_, double(gtlt.first) / m_); double beta = g_b(b_, double(gtlt.second) / m_); return {alpha, beta}; } static constexpr double __union_card(double alph, double beta, double lhcard, double rhcard) { return std::max((lhcard + rhcard) / (2. - alph - beta), 0.); } double getcard() const { if(mycard_ < 0.) mycard_ = cardinality(); return mycard_; } double jaccard_index(const SetSketch<ResT, FT> &o) const { if(!same_params(o)) throw std::invalid_argument("Parameters must match for comparison"); auto gtlt = eq::count_gtlt(data(), o.data(), m_); return jmle_simple<double>(gtlt.first, gtlt.second, m_, getcard(), o.getcard(), b_); } std::tuple<double, double, double> jointmle(const SetSketch<ResT, FT> &o) const { auto ji = jaccard_index(o); const auto y = 1. / (1. + ji); double mycard = getcard(), ocard = o.getcard(); return {std::max(0., mycard - ocard * ji) * y, std::max(0., ocard - mycard * ji) * y, (mycard + ocard) * ji * y}; }; double jaccard_index_by_card(const SetSketch<ResT, FT> &o) const { auto tup = jointmle(o); return std::get<2>(tup) / (std::get<0>(tup) + std::get<1>(tup) + std::get<2>(tup)); } std::tuple<double, double, double> alpha_beta_mu(const SetSketch<ResT, FT> &o) const { auto gtlt = eq::count_gtlt(data(), o.data(), m_); double alpha = g_b(b_, double(gtlt.first) / m_); double beta = g_b(b_, double(gtlt.second) / m_); double mycard = getcard(), ocard = o.getcard(); if(alpha + beta >= 1.) // They seem to be disjoint sets, use SetSketch (15) return {(mycard) / (mycard + ocard), ocard / (mycard + ocard), mycard + ocard}; return {alpha, beta, __union_card(alpha, beta, mycard, ocard)}; } void write(std::string s) const { gzFile fp = gzopen(s.data(), "w"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s + "for writing"); write(fp); gzclose(fp); } void read(std::string s) { gzFile fp = gzopen(s.data(), "r"); if(!fp) throw ZlibError(std::string("Failed to open file ") + s); read(fp); gzclose(fp); } void read(gzFile fp) { gzread(fp, &m_, sizeof(m_)); gzread(fp, &a_, sizeof(a_)); gzread(fp, &b_, sizeof(b_)); gzread(fp, &q_, sizeof(q_)); ainv_ = 1.L / a_; logbinv_ = 1.L / std::log1p(b_ - 1.); data_.reset(allocate(m_)); lowkh_.assign(data_.get(), m_, b_); gzread(fp, (void *)data_.get(), m_ * sizeof(ResT)); std::fill(&data_[m_], &data_[2 * m_ - 1], ResT(0)); for(size_t i = 0;i < m_; ++i) lowkh_.update(i, data_[i]); ls_.resize(m_); } int checkwrite(std::FILE *fp, const void *ptr, size_t nb) const { auto ret = ::write(::fileno(fp), ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } int checkwrite(gzFile fp, const void *ptr, size_t nb) const { auto ret = gzwrite(fp, ptr, nb); if(size_t(ret) != nb) throw ZlibError("Failed to write setsketch to file"); return ret; } void write(std::FILE *fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)&a_, sizeof(a_)); checkwrite(fp, (const void *)&b_, sizeof(b_)); checkwrite(fp, (const void *)&q_, sizeof(q_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(ResT)); } void write(gzFile fp) const { checkwrite(fp, (const void *)&m_, sizeof(m_)); checkwrite(fp, (const void *)&a_, sizeof(a_)); checkwrite(fp, (const void *)&b_, sizeof(b_)); checkwrite(fp, (const void *)&q_, sizeof(q_)); checkwrite(fp, (const void *)data_.get(), m_ * sizeof(ResT)); } void clear() { std::fill(data_.get(), &data_[m_ * 2 - 1], ResT(0)); mycard_ = -1.; } const std::vector<uint64_t> &ids() const {return ids_;} }; #ifndef M_E #define EULER_E 2.718281828459045 #else #define EULER_E M_E #endif struct NibbleSetS: public SetSketch<uint8_t> { NibbleSetS(size_t nreg, double b=EULER_E, double a=5e-4): SetSketch<uint8_t>(nreg, b, a, QV) {} static constexpr size_t QV = 14u; template<typename Arg> NibbleSetS(const Arg &arg): SetSketch<uint8_t>(arg) {} }; struct SmallNibbleSetS: public SetSketch<uint8_t> { SmallNibbleSetS(size_t nreg, double b=4., double a=1e-6): SetSketch<uint8_t>(nreg, b, a, QV) {} static constexpr size_t QV = 14u; template<typename Arg> SmallNibbleSetS(const Arg &arg): SetSketch<uint8_t>(arg) {} }; struct ByteSetS: public SetSketch<uint8_t, long double> { using Super = SetSketch<uint8_t, long double>; static constexpr size_t QV = 254u; ByteSetS(size_t nreg, long double b=1.2, long double a=20.): Super(nreg, b, a, QV) {} template<typename Arg> ByteSetS(const Arg &arg): Super(arg) {} }; struct ShortSetS: public SetSketch<uint16_t, long double> { static constexpr long double DEFAULT_B = 1.0005; static constexpr long double DEFAULT_A = .06; static constexpr size_t QV = 65534u; ShortSetS(size_t nreg, long double b=DEFAULT_B, long double a=DEFAULT_A): SetSketch<uint16_t, long double>(nreg, b, a, QV) {} template<typename Arg> ShortSetS(const Arg &arg): SetSketch<uint16_t, long double>(arg) {} }; struct WideShortSetS: public SetSketch<uint16_t, long double> { static constexpr long double DEFAULT_B = 1.0004; static constexpr long double DEFAULT_A = .06; static constexpr size_t QV = 65534u; WideShortSetS(size_t nreg, long double b=DEFAULT_B, long double a=DEFAULT_A): SetSketch<uint16_t, long double>(nreg, b, a, QV) {} template<typename...Args> WideShortSetS(Args &&...args): SetSketch<uint16_t, long double>(std::forward<Args>(args)...) {} }; struct EShortSetS: public SetSketch<uint16_t, long double> { using Super = SetSketch<uint16_t, long double>; static constexpr long double DEFAULT_B = 1.0006; static constexpr long double DEFAULT_A = .06; static constexpr size_t QV = 65534u; template<typename IT, typename OFT, typename=typename std::enable_if<std::is_integral<IT>::value && std::is_floating_point<OFT>::value>::type> EShortSetS(IT nreg, OFT b=DEFAULT_B, OFT a=DEFAULT_A): Super(nreg, b, a, QV) {} EShortSetS(size_t nreg): Super(nreg, DEFAULT_B, DEFAULT_A, QV) {} EShortSetS(int nreg): Super(nreg, DEFAULT_B, DEFAULT_A, QV) {} template<typename...Args> EShortSetS(Args &&...args): Super(std::forward<Args>(args)...) {} }; struct EByteSetS: public SetSketch<uint8_t, double> { static constexpr double DEFAULT_B = 1.09; static constexpr double DEFAULT_A = .08; static constexpr size_t QV = 254u; template<typename IT, typename=typename std::enable_if<std::is_integral<IT>::value>::type> EByteSetS(IT nreg, double b=DEFAULT_B, double a=DEFAULT_A): SetSketch<uint8_t, double>(nreg, b, a, QV) {} template<typename...Args> EByteSetS(Args &&...args): SetSketch<uint8_t, double>(std::forward<Args>(args)...) {} }; } // namespace setsketch using setsketch::EByteSetS; using setsketch::ByteSetS; using setsketch::ShortSetS; using setsketch::EShortSetS; using setsketch::WideShortSetS; using setsketch::NibbleSetS; using setsketch::SmallNibbleSetS; using setsketch::CSetSketch; using setsketch::SetSketch; } // namespace sketch #endif
error2.c
/****************************************************************************** * FILE: omp_bug5.c * DESCRIPTION: * Using SECTIONS, two threads initialize their own array and then add * it to the other's array, however a deadlock occurs. * AUTHOR: Blaise Barney 01/29/04 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 1000000 #define PI 3.1415926535 #define DELTA .01415926535 int main(int argc, char *argv[]) { int nthreads, tid, i; float a[N], b[N]; omp_lock_t locka, lockb; /* Initialize the locks */ omp_init_lock(&locka); omp_init_lock(&lockb); /* Fork a team of threads giving them their own copies of variables */ #pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid) { /* Obtain thread number and number of threads */ tid = omp_get_thread_num(); #pragma omp master { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n", tid); #pragma omp barrier #pragma omp sections nowait { // 1 #pragma omp section // 2 { printf("Thread %d initializing a[]\n", tid); omp_set_lock(&locka); for (i = 0; i < N; i++) a[i] = i * DELTA; omp_unset_lock(&locka); #pragma omp flush(b, a) omp_set_lock(&lockb); printf("Thread %d adding a[] to b[]\n", tid); for (i = 0; i < N; i++) b[i] += a[i]; omp_unset_lock(&lockb); } #pragma omp section // 3 { printf("Thread %d initializing b[]\n", tid); omp_set_lock(&lockb); for (i = 0; i < N; i++) b[i] = i * PI; omp_unset_lock(&lockb); #pragma omp flush(b, a) omp_set_lock(&locka); printf("Thread %d adding b[] to a[]\n", tid); for (i = 0; i < N; i++) a[i] += b[i]; omp_unset_lock(&locka); } } /* end of sections */ } /* end of parallel region */ }
ngdsac_derivative.h
/* Based on the DSAC++ and ESAC code. https://github.com/vislearn/LessMore https://github.com/vislearn/esac Copyright (c) 2016, TU Dresden Copyright (c) 2010, Heidelberg University All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the TU Dresden, Heidelberg University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TU DRESDEN OR HEIDELBERG UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #define PROB_THRESH 0.001 // ignore hypotheses with low probability for expectations namespace ngdsac { /** * @brief Calculates the Jacobean of the projection function w.r.t the given 3D point, ie. the function has the form 3 -> 1 * @param pt Ground truth 2D location. * @param obj 3D point. * @param rot Rotation in axis-angle format (OpenCV convention) * @param trans Translation vector (OpenCV convention). * @param camMat Calibration matrix of the camera. * @param maxReproj Reprojection errors are clamped to this maximum value. * @return 1x3 Jacobean matrix of partial derivatives. */ cv::Mat_<double> dProjectdObj( const cv::Point2f& pt, const cv::Point3f& obj, const cv::Mat& rot, const cv::Mat& trans, const cv::Mat& camMat, float maxReproErr) { double f = camMat.at<float>(0, 0); double ppx = camMat.at<float>(0, 2); double ppy = camMat.at<float>(1, 2); //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); objMat = rot * objMat + trans; if(std::abs(objMat.at<double>(2, 0)) < EPS) // prevent division by zero return cv::Mat_<double>::zeros(1, 3); // project double px = f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) + ppx; double py = f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) + ppy; // calculate error double err = std::sqrt((pt.x - px) * (pt.x - px) + (pt.y - py) * (pt.y - py)); // early out if projection error is above threshold if(err > maxReproErr) return cv::Mat_<double>::zeros(1, 3); err += EPS; // avoid dividing by zero // derivative in x direction of obj coordinate double pxdx = f * rot.at<double>(0, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double pydx = f * rot.at<double>(1, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double dx = 0.5 / err * (2 * (pt.x - px) * -pxdx + 2 * (pt.y - py) * -pydx); // derivative in y direction of obj coordinate double pxdy = f * rot.at<double>(0, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double pydy = f * rot.at<double>(1, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double dy = 0.5 / err * (2 * (pt.x - px) * -pxdy + 2 * (pt.y - py) * -pydy); // derivative in z direction of obj coordinate double pxdz = f * rot.at<double>(0, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double pydz = f * rot.at<double>(1, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double dz = 0.5 / err * (2 * (pt.x - px) * -pxdz + 2 * (pt.y - py) * -pydz); cv::Mat_<double> jacobean(1, 3); jacobean(0, 0) = dx; jacobean(0, 1) = dy; jacobean(0, 2) = dz; return jacobean; } /** * @brief Checks whether the given matrix contains NaN entries. * @param m Input matrix. * @return True if m contrains NaN entries. */ inline bool containsNaNs(const cv::Mat& m) { return cv::sum(cv::Mat(m != m))[0] > 0; } /** * @brief Calculates the Jacobean of the PNP function w.r.t. the object coordinate inputs. * * PNP is treated as a n x 3 -> 6 fnuction, i.e. it takes n 3D coordinates and maps them to a 6D pose. * The Jacobean is therefore 6x3n. * The Jacobean is calculated using central differences, and hence only suitable for small point sets. * For gradients of large points sets, we use an analytical approximaten, see the backard function in ngdsac.cpp. * * @param imgPts List of 2D points. * @param objPts List of corresponding 3D points. * @param camMat Camera calibration matrix. * @param eps Step size for central differences. * @return 6x3n Jacobean matrix of partial derivatives. */ cv::Mat_<double> dPNP( const std::vector<cv::Point2f>& imgPts, std::vector<cv::Point3f> objPts, const cv::Mat& camMat, float eps = 0.001f) { int pnpMethod = (imgPts.size() == 4) ? cv::SOLVEPNP_P3P : cv::SOLVEPNP_ITERATIVE; //in case of P3P the 4th point is needed to resolve ambiguities, its derivative is zero int effectiveObjPoints = (pnpMethod == cv::SOLVEPNP_P3P) ? 3 : objPts.size(); cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(6, objPts.size() * 3); bool success; // central differences for(int i = 0; i < effectiveObjPoints; i++) for(unsigned j = 0; j < 3; j++) { if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // forward step ngdsac::pose_t fStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), fStep.first, fStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x -= 2 * eps; else if(j == 1) objPts[i].y -= 2 * eps; else if(j == 2) objPts[i].z -= 2 * eps; // backward step ngdsac::pose_t bStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), bStep.first, bStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // gradient calculation fStep.first = (fStep.first - bStep.first) / (2 * eps); fStep.second = (fStep.second - bStep.second) / (2 * eps); fStep.first.copyTo(jacobean.col(i * 3 + j).rowRange(0, 3)); fStep.second.copyTo(jacobean.col(i * 3 + j).rowRange(3, 6)); if(containsNaNs(jacobean.col(i * 3 + j))) return cv::Mat_<double>::zeros(6, objPts.size() * 3); } return jacobean; } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated scene coordinates to a score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * @param sceneCoordinates Scene coordinate prediction (1x3xHxW). * @param sampling Contains original image coordinate for each scene coordinate predicted. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param jacobeansScore (output paramter) List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. * @param scoreOutputGradients Gradients w.r.t the score i.e. the gradients of the loss up to the soft inlier count. * @param hyps List of RANSAC hypotheses. * @param reproErrs Image of reprojection error for each pose hypothesis. * @param jacobeanHyps List of jacobean matrices with derivatives of the 6D pose wrt. the reprojection errors. * @param hypProbs Selection probabilities over all hypotheses. * @param camMat Camera calibration matrix. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierBeta Beta parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxReproj Reprojection errors are clamped to this maximum value. */ void dScore( ngdsac::coord_t& sceneCoordinates, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& sampledPoints, std::vector<cv::Mat_<double>>& jacobeansScore, const std::vector<double>& scoreOutputGradients, const std::vector<ngdsac::pose_t>& hyps, const std::vector<cv::Mat_<float>>& reproErrs, const std::vector<cv::Mat_<double>>& jacobeansHyps, const std::vector<double>& hypProbs, const cv::Mat& camMat, float inlierAlpha, float inlierBeta, float inlierThreshold, float maxReproErr) { int hypCount = sampledPoints.size(); // collect 2d-3D correspondences std::vector<std::vector<cv::Point2f>> imgPts(hypCount); std::vector<std::vector<cv::Point3f>> objPts(hypCount); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; int batchIdx = 0; // ony batch size = 1 supported atm for(unsigned i = 0; i < sampledPoints[h].size(); i++) { int x = sampledPoints[h][i].x; int y = sampledPoints[h][i].y; imgPts[h].push_back(sampling(y, x)); objPts[h].push_back(cv::Point3f( sceneCoordinates[batchIdx][0][y][x], sceneCoordinates[batchIdx][1][y][x], sceneCoordinates[batchIdx][2][y][x])); } } // derivatives of the soft inlier scores std::vector<cv::Mat_<double>> dReproErrs(reproErrs.size()); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; dReproErrs[h] = cv::Mat_<double>::zeros(reproErrs[h].size()); for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { double softThreshold = inlierBeta * (reproErrs[h](y, x) - inlierThreshold); softThreshold = 1 / (1+std::exp(-softThreshold)); dReproErrs[h](y, x) = -softThreshold * (1 - softThreshold) * inlierBeta * scoreOutputGradients[h]; } dReproErrs[h] *= inlierAlpha / dReproErrs[h].cols / dReproErrs[h].rows; } jacobeansScore.resize(hypCount); // derivative of the loss wrt the score #pragma omp parallel for for(int h = 0; h < hypCount; h++) { cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(1, sampling.cols * sampling.rows * 3); jacobeansScore[h] = jacobean; if(hypProbs[h] < PROB_THRESH) continue; int batchIdx = 0; // ony batch size = 1 supported atm // accumulate derivate of score wrt the object coordinates that are used to calculate the pose cv::Mat_<double> supportPointGradients = cv::Mat_<double>::zeros(1, 12); cv::Mat_<double> dHdO = dPNP(imgPts[h], objPts[h], camMat); // 6x12 if(ngdsac::getMax(dHdO) > 10) dHdO = 0; // clamping for stability cv::Mat rot; cv::Rodrigues(hyps[h].first, rot); for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { int ptIdx = x * dReproErrs[h].rows + y; cv::Point2f pt(sampling(y, x).x, sampling(y, x).y); cv::Point3f obj = cv::Point3f( sceneCoordinates[batchIdx][0][y][x], sceneCoordinates[batchIdx][1][y][x], sceneCoordinates[batchIdx][2][y][x]); // account for the direct influence of all object coordinates in the score cv::Mat_<double> dPdO = dProjectdObj(pt, obj, rot, hyps[h].second, camMat, maxReproErr); dPdO *= dReproErrs[h](y, x); dPdO.copyTo(jacobean.colRange(x * dReproErrs[h].rows * 3 + y * 3, x * dReproErrs[h].rows * 3 + y * 3 + 3)); // account for the indirect influence of the object coorindates that are used to calculate the pose cv::Mat_<double> dPdH = jacobeansHyps[h].row(ptIdx); supportPointGradients += dReproErrs[h](y, x) * dPdH * dHdO; } // add the accumulated derivatives for the object coordinates that are used to calculate the pose for(unsigned i = 0; i < sampledPoints[h].size(); i++) { unsigned x = sampledPoints[h][i].x; unsigned y = sampledPoints[h][i].y; jacobean.colRange(x * dReproErrs[h].rows * 3 + y * 3, x * dReproErrs[h].rows * 3 + y * 3 + 3) += supportPointGradients.colRange(i * 3, i * 3 + 3); } } } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated object coordinates to a soft max score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * * This is the Soft maxed version of dScore (see above). * * @param sceneCoordinates Scene coordinate prediction (1x3xHxW). * @param sampling Contains original image coordinate for each scene coordinate predicted. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param losses Loss value for each hypothesis. * @param hypProbs Selection probabilities over all hypotheses. * @paran initHyps List of unrefined hypotheses. * @paran initReproErrs List of reprojection error images of unrefined hypotheses. * @param jacobeanHyps List of jacobean matrices with derivatives of the 6D pose wrt. the reprojection errors. * @param camMat Camera calibration matrix. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierBeta Beta parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxReproj Reprojection errors are clamped to this maximum value. * @return List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. */ std::vector<cv::Mat_<double>> dSMScore( ngdsac::coord_t& sceneCoordinates, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& sampledPoints, const std::vector<double>& losses, const std::vector<double>& hypProbs, const std::vector<ngdsac::pose_t>& initHyps, const std::vector<cv::Mat_<float>>& initReproErrs, const std::vector<cv::Mat_<double>>& jacobeansHyps, const cv::Mat& camMat, float inlierAlpha, float inlierBeta, float inlierThreshold, float maxReproErr) { // assemble the gradients wrt the scores, ie the gradients of soft max function std::vector<double> scoreOutputGradients(sampledPoints.size()); #pragma omp parallel for for(unsigned i = 0; i < sampledPoints.size(); i++) { if(hypProbs[i] < PROB_THRESH) continue; scoreOutputGradients[i] = hypProbs[i] * losses[i]; for(unsigned j = 0; j < sampledPoints.size(); j++) scoreOutputGradients[i] -= hypProbs[i] * hypProbs[j] * losses[j]; } // calculate gradients of the score function std::vector<cv::Mat_<double>> jacobeansScore; dScore( sceneCoordinates, sampling, sampledPoints, jacobeansScore, scoreOutputGradients, initHyps, initReproErrs, jacobeansHyps, hypProbs, camMat, inlierAlpha, inlierBeta, inlierThreshold, maxReproErr); // data conversion #pragma omp parallel for for(unsigned i = 0; i < jacobeansScore.size(); i++) { // reorder to points row first into rows cv::Mat_<double> reformat = cv::Mat_<double>::zeros(sampling.cols * sampling.rows, 3); if(hypProbs[i] >= PROB_THRESH) { for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { cv::Mat_<double> patchGrad = jacobeansScore[i].colRange( x * sampling.rows * 3 + y * 3, x * sampling.rows * 3 + y * 3 + 3); patchGrad.copyTo(reformat.row(y * sampling.cols + x)); } } jacobeansScore[i] = reformat; } return jacobeansScore; } }
CPhotoconsistencyOdometryAnalytic.h
/* * Photoconsistency-Visual-Odometry * Multiscale Photoconsistency Visual Odometry from RGBD Images * Copyright (c) 2012-2013, Miguel Algaba Borrego * * http://code.google.com/p/photoconsistency-visual-odometry/ * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the holder(s) nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef _CPHOTOCONSISTENCY_ODOMETRY_ANALYTIC_ #define _CPHOTOCONSISTENCY_ODOMETRY_ANALYTIC_ #define ENABLE_GAUSSIAN_BLUR 1 #define ENABLE_BOX_FILTER_BLUR 0 #define ENABLE_OPENMP_MULTITHREADING_ANALYTIC 0 // Enables OpenMP for CPhotoconsistencyOdometryAnalytic #define ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS 0 #include "CPhotoconsistencyOdometry.h" #include "opencv2/highgui/highgui.hpp" #include "opencv2/contrib/contrib.hpp" //TickMeter #include <iostream> namespace phovo { namespace Analytic { /*!This class computes the rigid (6DoF) transformation that best aligns a pair of RGBD frames using a photoconsistency maximization approach. To estimate the rigid transformation, this class implements a coarse to fine approach. Thus, the algorithm starts finding a first pose approximation at a low resolution level and uses the estimate to initialize the optimization at greater image scales. Both the residuals and jacobians are computed analytically.*/ template< class TPixel, class TCoordinate > class CPhotoconsistencyOdometryAnalytic : public CPhotoconsistencyOdometry< TPixel, TCoordinate > { public: typedef CPhotoconsistencyOdometry< TPixel, TCoordinate > Superclass; typedef typename Superclass::CoordinateType CoordinateType; typedef typename Superclass::IntensityImageType IntensityImageType; typedef typename Superclass::DepthImageType DepthImageType; typedef typename Superclass::Matrix33Type Matrix33Type; typedef typename Superclass::Matrix44Type Matrix44Type; typedef typename Superclass::Vector6Type Vector6Type; typedef typename Superclass::Vector4Type Vector4Type; private: typedef DepthImageType InternalIntensityImageType; typedef std::vector< InternalIntensityImageType > InternalIntensityImageContainerType; typedef std::vector< DepthImageType > DepthImageContainerType; typedef std::vector< CoordinateType > CoordinateContainerType; typedef std::vector< int > IntegerContainerType; /*!Intensity (gray), depth and gradient image pyramids. Each pyramid has 'numOptimizationLevels' levels.*/ InternalIntensityImageContainerType m_IntensityPyramid0; InternalIntensityImageContainerType m_IntensityPyramid1; DepthImageContainerType m_DepthPyramid0; DepthImageContainerType m_DepthPyramid1; InternalIntensityImageContainerType m_IntensityGradientXPyramid1; InternalIntensityImageContainerType m_IntensityGradientYPyramid1; /*!Camera matrix (intrinsic parameters).*/ Matrix33Type m_IntrinsicMatrix; /*!Current optimization level. Level 0 corresponds to the higher image resolution.*/ int m_OptimizationLevel; /*!Number of optimization levels.*/ int m_NumOptimizationLevels; /*!Scaling factor to update the state vector (at each level).*/ CoordinateContainerType m_LambdaOptimizationSteps; /*!Size (in pixels) of the blur filter (at each level).*/ IntegerContainerType m_BlurFilterSizes; /*!Scaling factor applied to the image gradients (at each level).*/ CoordinateContainerType m_ImageGradientsScalingFactors; /*!Maximum number of iterations for the Gauss-Newton algorithm (at each level).*/ IntegerContainerType m_MaxNumIterations; /*!Minimum gradient norm of the jacobian (at each level).*/ CoordinateContainerType m_MinGradientNorms; /*!Enable the visualization of the optimization process (only for debug).*/ bool m_VisualizeIterations; /*!State vector.*/ Vector6Type m_StateVector; //Parameter vector (x y z yaw pitch roll) /*!Gradient of the error function.*/ Vector6Type m_Gradients; /*!Current iteration at the current optimization level.*/ int m_Iteration; /*!Minimum allowed depth to consider a depth pixel valid.*/ CoordinateType m_MinDepth; /*!Maximum allowed depth to consider a depth pixel valid.*/ CoordinateType m_MaxDepth; template< class TImage > void BuildPyramid( const TImage & img, std::vector< TImage > & pyramid, const int levels, const bool applyBlur ) { typedef TImage ImageType; //Create space for all the images pyramid.resize( levels ); double factor = 1.; for( int level=0; level<levels; level++ ) { //Create an auxiliar image of factor times the size of the original image ImageType imgAux; if( level!=0 ) { cv::resize( img, imgAux, cv::Size(0,0), factor, factor ); } else { imgAux = img; } //Blur the resized image with different filter size depending on the current pyramid level if( applyBlur ) { int blurFilterSize = m_BlurFilterSizes[level]; #if ENABLE_GAUSSIAN_BLUR if( blurFilterSize>0 ) { cv::GaussianBlur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ), 3 ); cv::GaussianBlur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ), 3 ); } #elif ENABLE_BOX_FILTER_BLUR if( blurFilterSize>0 ) { cv::blur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ) ); cv::blur( imgAux, imgAux, cv::Size( blurFilterSize, blurFilterSize ) ); } #endif } //Assign the resized image to the current level of the pyramid pyramid[level] = imgAux; factor = factor/2; } } void BuildDerivativesPyramids( InternalIntensityImageContainerType & imagePyramid, InternalIntensityImageContainerType & derXPyramid, InternalIntensityImageContainerType & derYPyramid) { //Compute image gradients double delta = 0.0; int ddepth = m_IntensityPyramid0[0].type(); //Create space for all the derivatives images derXPyramid.resize(imagePyramid.size()); derYPyramid.resize(imagePyramid.size()); for( size_t level=0; level<imagePyramid.size(); level++ ) { // Compute the gradient in x InternalIntensityImageType imgGray1_grad_x; cv::Scharr( imagePyramid[level], derXPyramid[level], ddepth, 1, 0, m_ImageGradientsScalingFactors[level], delta, cv::BORDER_DEFAULT ); // Compute the gradient in y InternalIntensityImageType imgGray1_grad_y; cv::Scharr( imagePyramid[level], derYPyramid[level],ddepth, 0, 1, m_ImageGradientsScalingFactors[level], delta, cv::BORDER_DEFAULT ); } } void ComputeResidualsAndJacobians( const InternalIntensityImageType & source_grayImg, const DepthImageType & source_depthImg, const InternalIntensityImageType & target_grayImg, const InternalIntensityImageType & target_gradXImg, const InternalIntensityImageType & target_gradYImg, Numeric::RowDynamicMatrixColMajor< CoordinateType, 1 > & residuals, Numeric::RowDynamicMatrixColMajor< CoordinateType, 6 > & jacobians, InternalIntensityImageType & warped_source_grayImage) const { int nRows = source_grayImg.rows; int nCols = source_grayImg.cols; CoordinateType scaleFactor = 1.0/pow(2,m_OptimizationLevel); CoordinateType fx = m_IntrinsicMatrix(0,0)*scaleFactor; CoordinateType fy = m_IntrinsicMatrix(1,1)*scaleFactor; CoordinateType ox = m_IntrinsicMatrix(0,2)*scaleFactor; CoordinateType oy = m_IntrinsicMatrix(1,2)*scaleFactor; CoordinateType inv_fx = 1.f/fx; CoordinateType inv_fy = 1.f/fy; CoordinateType x = m_StateVector(0); CoordinateType y = m_StateVector(1); CoordinateType z = m_StateVector(2); CoordinateType yaw = m_StateVector(3); CoordinateType pitch = m_StateVector(4); CoordinateType roll = m_StateVector(5); //Compute the rigid transformation matrix from the parameters Matrix44Type Rt = Matrix44Type::Identity(); CoordinateType sin_yaw = sin(yaw); CoordinateType cos_yaw = cos(yaw); CoordinateType sin_pitch = sin(pitch); CoordinateType cos_pitch = cos(pitch); CoordinateType sin_roll = sin(roll); CoordinateType cos_roll = cos(roll); Rt(0,0) = cos_yaw * cos_pitch; Rt(0,1) = cos_yaw * sin_pitch * sin_roll - sin_yaw * cos_roll; Rt(0,2) = cos_yaw * sin_pitch * cos_roll + sin_yaw * sin_roll; Rt(0,3) = x; Rt(1,0) = sin_yaw * cos_pitch; Rt(1,1) = sin_yaw * sin_pitch * sin_roll + cos_yaw * cos_roll; Rt(1,2) = sin_yaw * sin_pitch * cos_roll - cos_yaw * sin_roll; Rt(1,3) = y; Rt(2,0) = -sin_pitch; Rt(2,1) = cos_pitch * sin_roll; Rt(2,2) = cos_pitch * cos_roll; Rt(2,3) = z; Rt(3,0) = 0.0; Rt(3,1) = 0.0; Rt(3,2) = 0.0; Rt(3,3) = 1.0; CoordinateType temp1 = cos(pitch)*sin(roll); CoordinateType temp2 = cos(pitch)*cos(roll); CoordinateType temp3 = sin(pitch); CoordinateType temp4 = (sin(roll)*sin(yaw)+sin(pitch)*cos(roll)*cos(yaw)); CoordinateType temp5 = (sin(pitch)*sin(roll)*cos(yaw)-cos(roll)*sin(yaw)); CoordinateType temp6 = (sin(pitch)*sin(roll)*sin(yaw)+cos(roll)*cos(yaw)); CoordinateType temp7 = (-sin(pitch)*sin(roll)*sin(yaw)-cos(roll)*cos(yaw)); CoordinateType temp8 = (sin(roll)*cos(yaw)-sin(pitch)*cos(roll)*sin(yaw)); CoordinateType temp9 = (sin(pitch)*cos(roll)*sin(yaw)-sin(roll)*cos(yaw)); CoordinateType temp10 = cos(pitch)*sin(roll)*cos(yaw); CoordinateType temp11 = cos(pitch)*cos(yaw)+x; CoordinateType temp12 = cos(pitch)*cos(roll)*cos(yaw); CoordinateType temp13 = sin(pitch)*cos(yaw); CoordinateType temp14 = cos(pitch)*sin(yaw); CoordinateType temp15 = cos(pitch)*cos(yaw); CoordinateType temp16 = sin(pitch)*sin(roll); CoordinateType temp17 = sin(pitch)*cos(roll); CoordinateType temp18 = cos(pitch)*sin(roll)*sin(yaw); CoordinateType temp19 = cos(pitch)*cos(roll)*sin(yaw); CoordinateType temp20 = sin(pitch)*sin(yaw); CoordinateType temp21 = (cos(roll)*sin(yaw)-sin(pitch)*sin(roll)*cos(yaw)); CoordinateType temp22 = cos(pitch)*cos(roll); CoordinateType temp23 = cos(pitch)*sin(roll); CoordinateType temp24 = cos(pitch); #if ENABLE_OPENMP_MULTITHREADING_ANALYTIC #pragma omp parallel for #endif for( int r=0; r<nRows; r++ ) { for( int c=0; c<nCols; c++ ) { int i = nCols*r+c; //vector index //Compute the 3D coordinates of the pij of the source frame Vector4Type point3D; point3D(2) = source_depthImg( r, c ); if( m_MinDepth < point3D(2) && point3D(2) < m_MaxDepth )//Compute the jacobian only for the valid points { point3D(0) = (c - ox) * point3D(2) * inv_fx; point3D(1) = (r - oy) * point3D(2) * inv_fy; point3D(3) = 1.0; CoordinateType px = point3D(0); CoordinateType py = point3D(1); CoordinateType pz = point3D(2); //Transform the 3D point using the transformation matrix Rt Vector4Type transformedPoint3D = Rt*point3D; //Project the 3D point to the 2D plane CoordinateType inv_transformedPz = 1.0 / transformedPoint3D(2); CoordinateType transformed_c = (transformedPoint3D(0) * fx) * inv_transformedPz + ox; //transformed x (2D) CoordinateType transformed_r = (transformedPoint3D(1) * fy) * inv_transformedPz + oy; //transformed y (2D) int transformed_r_int = static_cast< int >( round( transformed_r ) ); int transformed_c_int = static_cast< int >( round( transformed_c ) ); //Asign the intensity value to the warped image and compute the difference between the transformed //pixel of frame 1 and the corresponding pixel of frame 2. Compute the error function if( ( transformed_r_int >= 0 && transformed_r_int < nRows ) & ( transformed_c_int >= 0 && transformed_c_int < nCols ) ) { //Obtain the pixel values that will be used to compute the pixel residual // pixel1: Intensity value of the pixel(r,c) of the warped frame 1 // pixel2: Intensity value of the pixel(r,c) of frame 2 CoordinateType pixel1 = source_grayImg( r, c ); CoordinateType pixel2 = target_grayImg( transformed_r_int, transformed_c_int ); //Compute the pixel jacobian Numeric::FixedMatrixRowMajor< CoordinateType, 2, 6 > jacobianPrRt; CoordinateType temp25 = 1.0/(z+py*temp1+pz*temp2-px*temp3); CoordinateType temp26 = temp25*temp25; //Derivative with respect to x jacobianPrRt(0,0) = fx*temp25; jacobianPrRt(1,0) = 0.0; //Derivative with respect to y jacobianPrRt(0,1) = 0.0; jacobianPrRt(1,1) = fy*temp25; //Derivative with respect to z jacobianPrRt(0,2) = -fx*(pz*temp4+py*temp5+px*temp11)*temp26; jacobianPrRt(1,2) = -fy*(py*temp6+pz*temp9+px*temp14+y)*temp26; //Derivative with respect to yaw jacobianPrRt(0,3) = fx*(py*temp7+pz*temp8-px*temp14)*temp25; jacobianPrRt(1,3) = fy*(pz*temp4+py*temp5+px*temp15)*temp25; //Derivative with respect to pitch jacobianPrRt(0,4) = fx*(py*temp10+pz*temp12-px*temp13)*temp25 -fx*(-py*temp16-pz*temp17-px*temp24)*(pz*temp4+py*temp5+px*temp11)*temp26; jacobianPrRt(1,4) = fy*(py*temp18+pz*temp19-px*temp20)*temp25 -fy*(-py*temp16-pz*temp17-px*temp24)*(py*temp6+pz*temp9+px*temp14+y)*temp26; //Derivative with respect to roll jacobianPrRt(0,5) = fx*(py*temp4+pz*temp21)*temp25 -fx*(py*temp22-pz*temp23)*(pz*temp4+py*temp5+px*temp11)*temp26; jacobianPrRt(1,5) = fy*(pz*temp7+py*temp9)*temp25 -fy*(py*temp22-pz*temp23)*(py*temp6+pz*temp9+px*temp14+y)*temp26; //Apply the chain rule to compound the image gradients with the projective+RigidTransform jacobians Numeric::FixedRowVector< CoordinateType, 2 > target_imgGradient; target_imgGradient(0) = target_gradXImg(i); target_imgGradient(1) = target_gradYImg(i); Numeric::FixedRowVector< CoordinateType, 6 > jacobian = target_imgGradient*jacobianPrRt; //Assign the pixel residual and jacobian to its corresponding row jacobians(i,0)=jacobian(0,0); jacobians(i,1)=jacobian(0,1); jacobians(i,2)=jacobian(0,2); jacobians(i,3)=jacobian(0,3); jacobians(i,4)=jacobian(0,4); jacobians(i,5)=jacobian(0,5); residuals( nCols * transformed_r_int + transformed_c_int , 0 ) = pixel2 - pixel1; if( m_VisualizeIterations ) { warped_source_grayImage( transformed_r_int, transformed_c_int ) = pixel1; } } } } } } enum TerminationCriteriaType { NonTerminated = -1, MaxIterationsReached = 0, GradientNormLowerThanThreshold = 1 }; bool TestTerminationCriteria() const { bool optimizationFinished = false; CoordinateType gradientNorm = m_Gradients.norm(); TerminationCriteriaType terminationCriteria = NonTerminated; if( m_Iteration >= m_MaxNumIterations[ m_OptimizationLevel ] ) { terminationCriteria = MaxIterationsReached; optimizationFinished = true; } else if( gradientNorm < m_MinGradientNorms[ m_OptimizationLevel ] ) { terminationCriteria = GradientNormLowerThanThreshold; optimizationFinished = true; } if( optimizationFinished ) { #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << "----------------------------------------" << std::endl; std::cout << "Optimization level: " << m_OptimizationLevel << std::endl; std::cout << "Termination criteria: "; #endif switch( terminationCriteria ) { case MaxIterationsReached: #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << " Max number of iterations reached (" << m_MaxNumIterations[ m_OptimizationLevel ] << ")" << std::endl;; #endif break; case GradientNormLowerThanThreshold: #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << " Gradient norm is lower than threshold (" << m_MinGradientNorms[ m_OptimizationLevel ] << ")" << std::endl; #endif break; default : break; } #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS std::cout << "Number iterations: " << m_Iteration << std::endl; std::cout << "gradient norm: " << gradientNorm << std::endl; std::cout << "----------------------------------------" << std::endl; #endif } return optimizationFinished; } public: CPhotoconsistencyOdometryAnalytic() : m_MinDepth( 0.3 ), m_MaxDepth( 5.0 ) { m_StateVector.setZero(); m_NumOptimizationLevels = 5; m_BlurFilterSizes.resize( m_NumOptimizationLevels, 0 ); m_ImageGradientsScalingFactors.resize( m_NumOptimizationLevels, 0.0625 ); m_LambdaOptimizationSteps.resize( m_NumOptimizationLevels, 1. ); m_MaxNumIterations.resize( m_NumOptimizationLevels, 0 ); m_MaxNumIterations[ 2 ] = 5; m_MaxNumIterations[ 3 ] = 20; m_MaxNumIterations[ 4 ] = 50; m_MinGradientNorms.resize( m_NumOptimizationLevels, 300. ); m_VisualizeIterations = false; } ~CPhotoconsistencyOdometryAnalytic(){} /*!Sets the minimum depth distance (m) to consider a certain pixel valid.*/ void SetMinDepth( const CoordinateType minD ) { m_MinDepth = minD; } /*!Sets the maximum depth distance (m) to consider a certain pixel valid.*/ void SetMaxDepth( const CoordinateType maxD ) { m_MaxDepth = maxD; } /*!Sets the 3x3 intrinsic camera matrix*/ void SetIntrinsicMatrix( const Matrix33Type & intrinsicMatrix ) { m_IntrinsicMatrix = intrinsicMatrix; } /*!Sets the source (Intensity+Depth) frame.*/ void SetSourceFrame( const IntensityImageType & intensityImage, const DepthImageType & depthImage ) { //Create an auxialiary image from the imput image InternalIntensityImageType intensityImageAux; intensityImage.convertTo( intensityImageAux, depthImage.type(), 1./255 ); //Compute image pyramids for the grayscale and depth images BuildPyramid( intensityImageAux, m_IntensityPyramid0, m_NumOptimizationLevels, true ); BuildPyramid( depthImage, m_DepthPyramid0, m_NumOptimizationLevels, false ); } /*!Sets the source (Intensity+Depth) frame. Depth image is ignored*/ void SetTargetFrame( const IntensityImageType & intensityImage, const DepthImageType & depthImage ) { //Create an auxialiary image from the imput image InternalIntensityImageType intensityImageAux; intensityImage.convertTo( intensityImageAux, depthImage.type(), 1./255 ); //Compute image pyramids for the grayscale and depth images BuildPyramid( intensityImageAux, m_IntensityPyramid1, m_NumOptimizationLevels, true ); //Compute image pyramids for the gradients images BuildDerivativesPyramids( m_IntensityPyramid1, m_IntensityGradientXPyramid1, m_IntensityGradientYPyramid1 ); } /*!Initializes the state vector to a certain value. The optimization process uses the initial state vector as the initial estimate.*/ void SetInitialStateVector( const Vector6Type & initialStateVector ) { m_StateVector = initialStateVector; } /*!Launches the least-squares optimization process to find the configuration of the state vector parameters that maximizes the photoconsistency between the source and target frame.*/ void Optimize() { for( m_OptimizationLevel = m_NumOptimizationLevels-1; m_OptimizationLevel >= 0; m_OptimizationLevel-- ) { int nRows = m_IntensityPyramid0[ m_OptimizationLevel ].rows; int nCols = m_IntensityPyramid0[ m_OptimizationLevel ].cols; int nPoints = nRows * nCols; m_Iteration = 0; while(true) { #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS cv::TickMeter tm;tm.start(); #endif InternalIntensityImageType warpedSourceIntensityImage; if( m_VisualizeIterations ) warpedSourceIntensityImage = InternalIntensityImageType::zeros( nRows, nCols ); Numeric::RowDynamicMatrixColMajor< CoordinateType, 1 > residuals; residuals.resize( nPoints, Eigen::NoChange ); residuals.setZero(); Numeric::RowDynamicMatrixColMajor< CoordinateType, 6 > jacobians; jacobians.resize( nPoints, Eigen::NoChange ); jacobians.setZero(); if( m_MaxNumIterations[ m_OptimizationLevel] > 0 ) //compute only if the number of maximum iterations are greater than 0 { ComputeResidualsAndJacobians( m_IntensityPyramid0[ m_OptimizationLevel ], m_DepthPyramid0[ m_OptimizationLevel ], m_IntensityPyramid1[ m_OptimizationLevel ], m_IntensityGradientXPyramid1[ m_OptimizationLevel ], m_IntensityGradientYPyramid1[ m_OptimizationLevel ], residuals, jacobians, warpedSourceIntensityImage ); m_Gradients = jacobians.transpose()*residuals; m_StateVector = m_StateVector - m_LambdaOptimizationSteps[ m_OptimizationLevel ] * ((jacobians.transpose()*jacobians).inverse() * m_Gradients ); #if ENABLE_PRINT_CONSOLE_OPTIMIZATION_PROGRESS tm.stop(); std::cout << "Iteration time = " << tm.getTimeSec() << " sec." << std::endl; #endif } m_Iteration++; if( TestTerminationCriteria() ){break;} if( m_VisualizeIterations ) { InternalIntensityImageType imgDiff = InternalIntensityImageType::zeros( nRows, nCols ); cv::absdiff( m_IntensityPyramid1[ m_OptimizationLevel ], warpedSourceIntensityImage, imgDiff ); cv::imshow("optimize::imgDiff",imgDiff); cv::waitKey(0); } } } //After all the optimization process the optimization level is 0 m_OptimizationLevel = 0; } /*!Returns the optimal state vector. This method has to be called after calling the Optimize() method.*/ Vector6Type GetOptimalStateVector() const { return m_StateVector; } /*!Returns the optimal 4x4 rigid transformation matrix between the source and target frame. This method has to be called after calling the Optimize() method.*/ Matrix44Type GetOptimalRigidTransformationMatrix() const { Matrix44Type Rt; eigenPose( m_StateVector(0), m_StateVector(1), m_StateVector(2), m_StateVector(3), m_StateVector(4), m_StateVector(5), Rt ); return Rt; } /*!Reads the configuration parameters from a .yml file.*/ void ReadConfigurationFile( const std::string & fileName ) { cv::FileStorage fs( fileName, cv::FileStorage::READ ); //Read the number of optimization levels fs["numOptimizationLevels"] >> m_NumOptimizationLevels; #if ENABLE_GAUSSIAN_BLUR || ENABLE_BOX_FILTER_BLUR //Read the blur filter size at every pyramid level fs["blurFilterSize (at each level)"] >> m_BlurFilterSizes; #endif //Read the scaling factor for each gradient image at each level fs["imageGradientsScalingFactor (at each level)"] >> m_ImageGradientsScalingFactors; //Read the lambda factor to change the optimization step fs["lambda_optimization_step (at each level)"] >> m_LambdaOptimizationSteps; //Read the number of Levenberg-Marquardt iterations at each optimization level fs["max_num_iterations (at each level)"] >> m_MaxNumIterations; //Read optimizer minimum gradient norm at each level fs["min_gradient_norm (at each level)"] >> m_MinGradientNorms; //Read the boolean value to determine if visualize the progress images or not fs["visualizeIterations"] >> m_VisualizeIterations; } }; } //end namespace Analytic } //end namespace phovo #endif
8_data-env2.c
#include <stdio.h> #include <omp.h> #include <stdlib.h> int main(int argc, char** argv) { int x = 100; omp_set_num_threads(20); #pragma omp parallel { x = omp_get_thread_num(); printf("Sou a thread %d, meu valor de x é %d\n", omp_get_thread_num(), x); } return 0; }
GB_unaryop__abs_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_uint8 // op(A') function: GB_tran__abs_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_uint8 ( uint8_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif